From noreply at buildbot.pypy.org Mon Jul 1 08:38:27 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 1 Jul 2013 08:38:27 +0200 (CEST) Subject: [pypy-commit] stmgc default: add major collections to test Message-ID: <20130701063827.AC92D1C3022@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r325:cf2cf2b3df21 Date: 2013-07-01 08:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/cf2cf2b3df21/ Log: add major collections to test diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -19,6 +19,7 @@ #define PREBUILT 3 // per thread #define MAXROOTS 1000 #define SHARED_ROOTS 5 // shared by threads +#define DO_MAJOR_COLLECTS 1 @@ -276,19 +277,25 @@ gcptr rare_events(gcptr p, gcptr _r, gcptr _sr) { - int k = get_rand(10); - if (k == 1) { + int k = get_rand(100); + if (k < 10) { push_roots(); stm_push_root(p); stm_become_inevitable("fun"); p = stm_pop_root(); pop_roots(); } - else if (k < 4) { + else if (k < 40) { push_roots(); stmgc_minor_collect(); pop_roots(); p = NULL; + } else if (k < 41 && DO_MAJOR_COLLECTS) { + fprintf(stdout, "major collect\n"); + push_roots(); + stmgcpage_possibly_major_collect(1); + pop_roots(); + p = NULL; } return p; } @@ -418,6 +425,7 @@ k = get_rand(9); check(p); + assert(thread_descriptor->active); if (k < 3) p = simple_events(p, _r, _sr); From noreply at buildbot.pypy.org Mon Jul 1 08:50:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 08:50:24 +0200 (CEST) Subject: [pypy-commit] pypy identity-set: close to-be-merged branch Message-ID: <20130701065024.6B3D71C0168@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: identity-set Changeset: r65131:bacccee16ef3 Date: 2013-07-01 08:49 +0200 http://bitbucket.org/pypy/pypy/changeset/bacccee16ef3/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Mon Jul 1 08:50:25 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 08:50:25 +0200 (CEST) Subject: [pypy-commit] pypy default: (andrewchambers) merge identity-set. This branch adds a set based on Message-ID: <20130701065025.D54651C0168@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65132:1d6d177a0c6e Date: 2013-07-01 08:49 +0200 http://bitbucket.org/pypy/pypy/changeset/1d6d177a0c6e/ Log: (andrewchambers) merge identity-set. This branch adds a set based on identity, just like identity dict. diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -26,7 +26,7 @@ def __repr__(self): """representation for debugging purposes""" reprlist = [repr(w_item) for w_item in self.getkeys()] - return "<%s(%s)>" % (self.__class__.__name__, ', '.join(reprlist)) + return "<%s(%s)(%s)>" % (self.__class__.__name__, self.strategy, ', '.join(reprlist)) def from_storage_and_strategy(self, storage, strategy): obj = self._newobj(self.space, None) @@ -780,6 +780,8 @@ strategy = self.space.fromcache(StringSetStrategy) elif type(w_key) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeSetStrategy) + elif self.space.type(w_key).compares_by_identity(): + strategy = self.space.fromcache(IdentitySetStrategy) else: strategy = self.space.fromcache(ObjectSetStrategy) w_set.strategy = strategy @@ -1336,6 +1338,41 @@ break d_obj[w_item] = None +class IdentitySetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("identityset") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def is_correct_type(self, w_key): + w_type = self.space.type(w_key) + return w_type.compares_by_identity() + + def may_contain_equal_elements(self, strategy): + #empty first, probably more likely + if strategy is self.space.fromcache(EmptySetStrategy): + return False + if strategy is self.space.fromcache(IntegerSetStrategy): + return False + if strategy is self.space.fromcache(StringSetStrategy): + return False + if strategy is self.space.fromcache(UnicodeSetStrategy): + return False + return True + + def unwrap(self, w_item): + return w_item + + def wrap(self, item): + return item + + def iter(self, w_set): + return IdentityIteratorImplementation(self.space, self, w_set) class IteratorImplementation(object): def __init__(self, space, strategy, implementation): @@ -1427,6 +1464,17 @@ else: return None +class IdentityIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, w_set): + IteratorImplementation.__init__(self, space, strategy, w_set) + d = strategy.unerase(w_set.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + for key in self.iterator: + return self.space.wrap(key) + else: + return None class RDictIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, w_set): @@ -1545,6 +1593,15 @@ w_set.strategy = space.fromcache(UnicodeSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return + + # check for compares by identity + for w_item in iterable_w: + if not space.type(w_item).compares_by_identity(): + break + else: + w_set.strategy = space.fromcache(IdentitySetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return w_set.strategy = space.fromcache(ObjectSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_identityset.py @@ -0,0 +1,209 @@ +import py + + + +class AppTestIdentitySet(object): + + #needed for compares_by_identity + spaceconfig = {"objspace.std.withidentitydict": True} + + def setup_class(cls): + from pypy.objspace.std import identitydict + if cls.runappdirect: + py.test.skip("interp2app doesn't work on appdirect") + + def w_uses_strategy(self, s , obj): + import __pypy__ + return s in __pypy__.internal_repr(obj) + + def test_use_identity_strategy(self): + + class Plain(object): + pass + + class CustomEq(object): + def __eq__(self, other): + return True + + class CustomCmp (object): + def __cmp__(self, other): + return 0 + + class CustomHash(object): + def __hash__(self): + return 0 + + s = set() + + assert not self.uses_strategy('IdentitySetStrategy',s) + + s.add(Plain()) + + assert self.uses_strategy('IdentitySetStrategy',s) + + for cls in [CustomEq,CustomCmp,CustomHash]: + s = set() + s.add(cls()) + assert not self.uses_strategy('IdentitySetStrategy',s) + + + def test_use_identity_strategy_list(self): + + class X(object): + pass + + assert self.uses_strategy('IdentitySetStrategy',set([X(),X()])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) + + def test_identity_strategy_add(self): + + class X(object): + pass + + class NotIdent(object): + def __eq__(self,other): + pass + + s = set([X(),X()]) + s.add('foo') + assert not self.uses_strategy('IdentitySetStrategy',s) + s = set([X(),X()]) + s.add(NotIdent()) + assert not self.uses_strategy('IdentitySetStrategy',s) + + def test_identity_strategy_sanity(self): + + class X(object): + pass + + class Y(object): + pass + + a,b,c,d,e,f = X(),Y(),X(),Y(),X(),Y() + + s = set([a,b]).union(set([c])) + assert self.uses_strategy('IdentitySetStrategy',s) + assert set([a,b,c]) == s + s = set([a,b,c,d,e,f]) - set([d,e,f]) + assert self.uses_strategy('IdentitySetStrategy',s) + assert set([a,b,c]) == s + + + s = set([a]) + s.update([b,c]) + + assert s == set([a,b,c]) + + + def test_identity_strategy_iterators(self): + + class X(object): + pass + + s = set([X() for i in range(10)]) + counter = 0 + for item in s: + counter += 1 + assert item in s + + assert counter == 10 + + + def test_identity_strategy_other_cmp(self): + + #test tries to hit positive and negative in + # may_contain_equal_elements + + class X(object): + pass + + s = set([X() for i in range(10)]) + + assert s.intersection(set([1,2,3])) == set() + assert s.intersection(set(['a','b','c'])) == set() + assert s.intersection(set(['a','b','c'])) == set() + assert s.intersection(set([X(),X()])) == set() + + other = set(['a','b','c',s.__iter__().next()]) + intersect = s.intersection(other) + assert len(intersect) == 1 + assert intersect.__iter__().next() in s + assert intersect.__iter__().next() in other + + def test_class_monkey_patch(self): + + class X(object): + pass + + s = set() + + s.add(X()) + assert self.uses_strategy('IdentitySetStrategy',s) + X.__eq__ = lambda self,other : None + s.add(X()) + assert not self.uses_strategy('IdentitySetStrategy',s) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),X()])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) + + # An interesting case, add an instance, mutate the class, + # then add the same instance. + + class X(object): + pass + + s = set() + inst = X() + s.add(inst) + X.__eq__ = lambda x,y : x is y + s.add(inst) + + assert len(s) == 1 + assert s.__iter__().next() is inst + assert not self.uses_strategy('IdentitySetStrategy',s) + + + #Add instance, mutate class, check membership of that instance. + + class X(object): + pass + + + inst = X() + s = set() + s.add(inst) + X.__eq__ = lambda x,y : x is y + assert inst in s + + # Test Wrong strategy + # If the strategy is changed by mutation, but the instance + # does not change, then this tests the methods that call + # may_contain_equal_elements still function. + # i.e. same instance in two sets, one with object strategy, one with + # identity strategy. + + class X(object): + pass + + + inst = X() + s1 = set() + s1.add(inst) + assert self.uses_strategy('IdentitySetStrategy',s1) + X.__eq__ = lambda x,y : x is y + s2 = set() + s2.add(inst) + assert not self.uses_strategy('IdentitySetStrategy',s2) + + assert s1.intersection(s2) == set([inst]) + assert (s1 - s2) == set() + assert (s2 - s1) == set() + + s1.difference_update(s2) + assert s1 == set() + + + From noreply at buildbot.pypy.org Mon Jul 1 08:51:07 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 08:51:07 +0200 (CEST) Subject: [pypy-commit] pypy default: document branch Message-ID: <20130701065107.AA2A21C0168@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65133:8c5e26a49209 Date: 2013-07-01 08:50 +0200 http://bitbucket.org/pypy/pypy/changeset/8c5e26a49209/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -65,3 +65,5 @@ .. branch: ndarray-view Add view to ndarray and zeroD arrays, not on dtype scalars yet +.. branch: identity-set +Faster sets for objects From noreply at buildbot.pypy.org Mon Jul 1 09:15:26 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 09:15:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Some set speedups Message-ID: <20130701071526.5B4591C025F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65134:8a241c817172 Date: 2013-07-01 09:14 +0200 http://bitbucket.org/pypy/pypy/changeset/8a241c817172/ Log: Some set speedups diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -941,7 +941,12 @@ def equals(self, w_set, w_other): if w_set.length() != w_other.length(): return False + if w_set.length() == 0: + return True + # it's possible to have 0-lenght strategy that's not empty items = self.unerase(w_set.sstorage).keys() + if not self.may_contain_equal_elements(w_other.strategy): + return False for key in items: if not w_other.has_key(self.wrap(key)): return False @@ -1210,7 +1215,9 @@ def may_contain_equal_elements(self, strategy): if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(EmptySetStrategy): + elif strategy is self.space.fromcache(EmptySetStrategy): + return False + elif strategy is self.space.fromcache(IdentitySetStrategy): return False return True @@ -1244,7 +1251,9 @@ def may_contain_equal_elements(self, strategy): if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(EmptySetStrategy): + elif strategy is self.space.fromcache(EmptySetStrategy): + return False + elif strategy is self.space.fromcache(IdentitySetStrategy): return False return True @@ -1278,9 +1287,11 @@ def may_contain_equal_elements(self, strategy): if strategy is self.space.fromcache(StringSetStrategy): return False - if strategy is self.space.fromcache(UnicodeSetStrategy): + elif strategy is self.space.fromcache(UnicodeSetStrategy): return False - if strategy is self.space.fromcache(EmptySetStrategy): + elif strategy is self.space.fromcache(EmptySetStrategy): + return False + elif strategy is self.space.fromcache(IdentitySetStrategy): return False return True @@ -1342,7 +1353,7 @@ erase, unerase = rerased.new_erasing_pair("identityset") erase = staticmethod(erase) unerase = staticmethod(unerase) - + def get_empty_storage(self): return self.erase({}) @@ -1369,10 +1380,10 @@ return w_item def wrap(self, item): - return item - + return item + def iter(self, w_set): - return IdentityIteratorImplementation(self.space, self, w_set) + return IdentityIteratorImplementation(self.space, self, w_set) class IteratorImplementation(object): def __init__(self, space, strategy, implementation): @@ -1593,7 +1604,7 @@ w_set.strategy = space.fromcache(UnicodeSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return - + # check for compares by identity for w_item in iterable_w: if not space.type(w_item).compares_by_identity(): @@ -1601,7 +1612,7 @@ else: w_set.strategy = space.fromcache(IdentitySetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) - return + return w_set.strategy = space.fromcache(ObjectSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) From noreply at buildbot.pypy.org Mon Jul 1 11:19:10 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 1 Jul 2013 11:19:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix uninitialized shadowstack problem during major collection Message-ID: <20130701091910.B78781C0168@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r326:18b8edd35778 Date: 2013-07-01 11:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/18b8edd35778/ Log: fix uninitialized shadowstack problem during major collection diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1504,7 +1504,6 @@ revision_t i; struct tx_descriptor *d = stm_malloc(sizeof(struct tx_descriptor)); memset(d, 0, sizeof(struct tx_descriptor)); - stmgcpage_acquire_global_lock(); struct tx_public_descriptor *pd; i = descriptor_array_free_list; @@ -1554,7 +1553,6 @@ (long)d->public_descriptor_index, (long)pthread_self())); stmgcpage_init_tls(); - stmgcpage_release_global_lock(); return 1; } else @@ -1567,7 +1565,6 @@ struct tx_descriptor *d = thread_descriptor; assert(d != NULL); assert(d->active == 0); - stmgcpage_acquire_global_lock(); /* our nursery is empty at this point. The list 'stolen_objects' should have been emptied at the previous minor collection and @@ -1585,7 +1582,6 @@ if (d->tx_prev != NULL) d->tx_prev->tx_next = d->tx_next; if (d->tx_next != NULL) d->tx_next->tx_prev = d->tx_prev; if (d == stm_tx_head) stm_tx_head = d->tx_next; - stmgcpage_release_global_lock(); thread_descriptor = NULL; diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -625,6 +625,7 @@ if (d != saved) { /* Hack: temporarily pretend that we "are" the other thread... */ + assert(d->shadowstack_end_ref && *d->shadowstack_end_ref); thread_descriptor = d; stm_private_rev_num = *d->private_revision_ref; stm_read_barrier_cache = *d->read_barrier_cache_ref; diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -198,7 +198,8 @@ return (revision_t)p; } - + /* XXX: think about if p->h_original needs a volatile read + and if we need a memory fence (smp_wmb())... */ spinlock_acquire(d->public_descriptor->collection_lock, 'I'); /* old objects must have an h_original xOR be diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -79,12 +79,14 @@ void stm_initialize(void) { + stmgcpage_acquire_global_lock(); int r = DescriptorInit(); if (r != 1) stm_fatalerror("stm_initialize: DescriptorInit failure\n"); stmgc_init_nursery(); init_shadowstack(); //stmgcpage_init_tls(); + stmgcpage_release_global_lock(); BeginInevitableTransaction(); } @@ -92,10 +94,12 @@ { stmgc_minor_collect(); /* force everything out of the nursery */ CommitTransaction(); + stmgcpage_acquire_global_lock(); //stmgcpage_done_tls(); done_shadowstack(); stmgc_done_nursery(); DescriptorDone(); + stmgcpage_release_global_lock(); } /************************************************************/ From noreply at buildbot.pypy.org Mon Jul 1 11:48:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 11:48:03 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add asserts Message-ID: <20130701094803.EF67C1C010B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r327:07c9a1daeba2 Date: 2013-07-01 11:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/07c9a1daeba2/ Log: Add asserts diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -269,6 +269,7 @@ else { assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); gcptr B = (gcptr)obj->h_revision; + assert(!(B->h_tid & GCFLAG_STUB)); gcptrlist_insert(&objects_to_trace, B); if (!(B->h_tid & GCFLAG_PUBLIC)) { @@ -286,6 +287,7 @@ } } obj->h_tid |= GCFLAG_VISITED; + assert(!(obj->h_tid & GCFLAG_STUB)); gcptrlist_insert(&objects_to_trace, obj); } From noreply at buildbot.pypy.org Mon Jul 1 12:33:11 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 1 Jul 2013 12:33:11 +0200 (CEST) Subject: [pypy-commit] stmgc default: add atomic transactions in demo_random Message-ID: <20130701103311.C51A71C0296@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r328:6eac0903bcb9 Date: 2013-07-01 12:32 +0200 http://bitbucket.org/pypy/stmgc/changeset/6eac0903bcb9/ Log: add atomic transactions in demo_random diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -60,6 +60,7 @@ int num_roots_outside_perform; int steps_left; int interruptible; + int atomic; }; __thread struct thread_data td; @@ -74,6 +75,24 @@ (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); } +static void inc_atomic() +{ + assert(td.interruptible); + assert(stm_atomic(0) == td.atomic); + td.atomic++; + stm_atomic(1); + assert(stm_atomic(0) == td.atomic); +} + +static void dec_atomic() +{ + assert(td.interruptible); + assert(stm_atomic(0) == td.atomic); + td.atomic--; + stm_atomic(-1); + assert(stm_atomic(0) == td.atomic); +} + int get_rand(int max) { return (int)(rand_r(&td.thread_seed) % (unsigned int)max); @@ -254,7 +273,8 @@ td.thread_seed = default_seed++; td.steps_left = STEPS_PER_THREAD; td.interruptible = 0; - + td.atomic = 0; + td.num_roots = PREBUILT + NUMROOTS; for (i = 0; i < PREBUILT; i++) { if (i % 3 == 0) { @@ -303,7 +323,7 @@ gcptr simple_events(gcptr p, gcptr _r, gcptr _sr) { nodeptr w_r; - int k = get_rand(8); + int k = get_rand(11); int num = get_rand(td.num_roots); switch (k) { case 0: // remove a root @@ -338,6 +358,18 @@ check(p); w_r->next = (struct node*)p; break; + case 8: + if (td.interruptible) { + inc_atomic(); + } + break; + case 9: + case 10: + /* more likely to be less atomic */ + if (td.atomic) { + dec_atomic(); + } + break; } return p; } @@ -469,7 +501,7 @@ td.num_roots = td.num_roots_outside_perform; // done & overwritten by the following pop_roots(): // copy_roots(td.roots_outside_perform, td.roots, td.num_roots); - + td.atomic = 0; // may be set differently on abort // refresh td.roots: gcptr end_marker = stm_pop_root(); assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF); @@ -490,14 +522,26 @@ int run_me() { gcptr p = NULL; - while (td.steps_left-->0) { + while (td.steps_left-->0 || td.atomic) { if (td.steps_left % 8 == 0) fprintf(stdout, "#"); p = do_step(p); - if (p == (gcptr)-1) - return -1; + if (p == (gcptr)-1) { + if (td.atomic) { + // can't break, well, we could return to perform_transaction + // while being atomic. (TODO) + // may be true when major gc requested: + // assert(stm_should_break_transaction() == 0); + assert(stm_atomic(0) == td.atomic); + p = NULL; + } + else { + assert(stm_atomic(0) == 0); + return -1; + } + } } return 0; } diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -906,6 +906,8 @@ long stm_atomic(long delta) { struct tx_descriptor *d = thread_descriptor; + if (delta) // no atomic-checks + dprintf(("stm_atomic(%lu)\n", delta)); d->atomic += delta; assert(d->atomic >= 0); update_reads_size_limit(d); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -198,9 +198,6 @@ return (revision_t)p; } - /* XXX: think about if p->h_original needs a volatile read - and if we need a memory fence (smp_wmb())... */ - spinlock_acquire(d->public_descriptor->collection_lock, 'I'); /* old objects must have an h_original xOR be the original itself. @@ -222,7 +219,6 @@ gcptr O = stmgc_duplicate_old(p); p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; - O->h_tid |= GCFLAG_PUBLIC; if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { gcptr B = (gcptr)p->h_revision; From noreply at buildbot.pypy.org Mon Jul 1 12:39:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 12:39:00 +0200 (CEST) Subject: [pypy-commit] stmgc default: Avoids doing this (I fixed the nursery to always really be null-allocated) Message-ID: <20130701103900.6AD6C1C0296@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r329:5b45c01aa560 Date: 2013-07-01 12:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/5b45c01aa560/ Log: Avoids doing this (I fixed the nursery to always really be null- allocated) diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -91,7 +91,7 @@ assert(tid == (tid & STM_USER_TID_MASK)); gcptr P = allocate_nursery(size, tid); P->h_revision = stm_private_rev_num; - P->h_original = 0; + /*P->h_original = 0; --- the object is null-initialized already */ return P; } From noreply at buildbot.pypy.org Mon Jul 1 12:39:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 12:39:05 +0200 (CEST) Subject: [pypy-commit] stmgc default: merge heads Message-ID: <20130701103905.931B11C0296@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r330:bf175ef919bf Date: 2013-07-01 12:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/bf175ef919bf/ Log: merge heads diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -60,6 +60,7 @@ int num_roots_outside_perform; int steps_left; int interruptible; + int atomic; }; __thread struct thread_data td; @@ -74,6 +75,24 @@ (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); } +static void inc_atomic() +{ + assert(td.interruptible); + assert(stm_atomic(0) == td.atomic); + td.atomic++; + stm_atomic(1); + assert(stm_atomic(0) == td.atomic); +} + +static void dec_atomic() +{ + assert(td.interruptible); + assert(stm_atomic(0) == td.atomic); + td.atomic--; + stm_atomic(-1); + assert(stm_atomic(0) == td.atomic); +} + int get_rand(int max) { return (int)(rand_r(&td.thread_seed) % (unsigned int)max); @@ -254,7 +273,8 @@ td.thread_seed = default_seed++; td.steps_left = STEPS_PER_THREAD; td.interruptible = 0; - + td.atomic = 0; + td.num_roots = PREBUILT + NUMROOTS; for (i = 0; i < PREBUILT; i++) { if (i % 3 == 0) { @@ -303,7 +323,7 @@ gcptr simple_events(gcptr p, gcptr _r, gcptr _sr) { nodeptr w_r; - int k = get_rand(8); + int k = get_rand(11); int num = get_rand(td.num_roots); switch (k) { case 0: // remove a root @@ -338,6 +358,18 @@ check(p); w_r->next = (struct node*)p; break; + case 8: + if (td.interruptible) { + inc_atomic(); + } + break; + case 9: + case 10: + /* more likely to be less atomic */ + if (td.atomic) { + dec_atomic(); + } + break; } return p; } @@ -469,7 +501,7 @@ td.num_roots = td.num_roots_outside_perform; // done & overwritten by the following pop_roots(): // copy_roots(td.roots_outside_perform, td.roots, td.num_roots); - + td.atomic = 0; // may be set differently on abort // refresh td.roots: gcptr end_marker = stm_pop_root(); assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF); @@ -490,14 +522,26 @@ int run_me() { gcptr p = NULL; - while (td.steps_left-->0) { + while (td.steps_left-->0 || td.atomic) { if (td.steps_left % 8 == 0) fprintf(stdout, "#"); p = do_step(p); - if (p == (gcptr)-1) - return -1; + if (p == (gcptr)-1) { + if (td.atomic) { + // can't break, well, we could return to perform_transaction + // while being atomic. (TODO) + // may be true when major gc requested: + // assert(stm_should_break_transaction() == 0); + assert(stm_atomic(0) == td.atomic); + p = NULL; + } + else { + assert(stm_atomic(0) == 0); + return -1; + } + } } return 0; } diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -906,6 +906,8 @@ long stm_atomic(long delta) { struct tx_descriptor *d = thread_descriptor; + if (delta) // no atomic-checks + dprintf(("stm_atomic(%lu)\n", delta)); d->atomic += delta; assert(d->atomic >= 0); update_reads_size_limit(d); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -198,9 +198,6 @@ return (revision_t)p; } - /* XXX: think about if p->h_original needs a volatile read - and if we need a memory fence (smp_wmb())... */ - spinlock_acquire(d->public_descriptor->collection_lock, 'I'); /* old objects must have an h_original xOR be the original itself. @@ -222,7 +219,6 @@ gcptr O = stmgc_duplicate_old(p); p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; - O->h_tid |= GCFLAG_PUBLIC; if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { gcptr B = (gcptr)p->h_revision; From noreply at buildbot.pypy.org Mon Jul 1 12:52:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 12:52:49 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix(?) the logic, I believe, and remove the recursion; but demo_random Message-ID: <20130701105249.23C121C0296@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r331:452c22b17f3e Date: 2013-07-01 12:52 +0200 http://bitbucket.org/pypy/stmgc/changeset/452c22b17f3e/ Log: Fix(?) the logic, I believe, and remove the recursion; but demo_random crashes. To investigate. diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -230,6 +230,10 @@ if (obj->h_revision & 1) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see also fix_outdated() */ + + obj->h_tid |= GCFLAG_VISITED; + assert(!(obj->h_tid & GCFLAG_STUB)); + gcptrlist_insert(&objects_to_trace, obj); } else if (obj->h_tid & GCFLAG_PUBLIC) { /* h_revision is a ptr: we have a more recent version */ @@ -269,26 +273,24 @@ else { assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); gcptr B = (gcptr)obj->h_revision; + assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); + + obj->h_tid |= GCFLAG_VISITED; + B->h_tid |= GCFLAG_VISITED; + assert(!(obj->h_tid & GCFLAG_STUB)); assert(!(B->h_tid & GCFLAG_STUB)); - gcptrlist_insert(&objects_to_trace, B); + gcptrlist_insert2(&objects_to_trace, obj, B); - if (!(B->h_tid & GCFLAG_PUBLIC)) { - /* a regular private_from_protected object with a backup copy B */ - assert(B->h_tid & GCFLAG_BACKUP_COPY); - assert(B->h_revision & 1); - B->h_tid |= GCFLAG_VISITED; - } - else { - /* a private_from_protected with a stolen backup copy B */ + if (IS_POINTER(B->h_revision)) { + assert(B->h_tid & GCFLAG_PUBLIC); assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - gcptr obj1 = B; - visit(&obj1); /* xxx recursion? */ - obj->h_revision = (revision_t)obj1; + assert(!(B->h_revision & 2)); + + pobj = (gcptr *)&B->h_revision; + obj = *pobj; + goto restart; } } - obj->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_STUB)); - gcptrlist_insert(&objects_to_trace, obj); } static void visit_all_objects(void) From noreply at buildbot.pypy.org Mon Jul 1 13:05:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 13:05:03 +0200 (CEST) Subject: [pypy-commit] stmgc default: More fixes Message-ID: <20130701110503.9B1421C0296@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r332:874251ce1920 Date: 2013-07-01 13:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/874251ce1920/ Log: More fixes diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -219,21 +219,14 @@ return; restart: - if (obj->h_tid & GCFLAG_VISITED) { - dprintf(("[already visited: %p]\n", obj)); - assert(obj == *pobj); - assert((obj->h_revision & 3) || /* either odd, or stub */ - (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - return; /* already seen */ - } - if (obj->h_revision & 1) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see also fix_outdated() */ - - obj->h_tid |= GCFLAG_VISITED; assert(!(obj->h_tid & GCFLAG_STUB)); - gcptrlist_insert(&objects_to_trace, obj); + if (!(obj->h_tid & GCFLAG_VISITED)) { + obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ + obj->h_tid |= GCFLAG_VISITED; + gcptrlist_insert(&objects_to_trace, obj); + } } else if (obj->h_tid & GCFLAG_PUBLIC) { /* h_revision is a ptr: we have a more recent version */ @@ -270,6 +263,13 @@ *pobj = obj; goto restart; } + else if (obj->h_tid & GCFLAG_VISITED) { + dprintf(("[already visited: %p]\n", obj)); + assert(obj == *pobj); + assert((obj->h_revision & 3) || /* either odd, or stub */ + (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + return; /* already seen */ + } else { assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); gcptr B = (gcptr)obj->h_revision; diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -91,7 +91,7 @@ assert(tid == (tid & STM_USER_TID_MASK)); gcptr P = allocate_nursery(size, tid); P->h_revision = stm_private_rev_num; - /*P->h_original = 0; --- the object is null-initialized already */ + assert(P->h_original == 0); /* null-initialized already */ return P; } From noreply at buildbot.pypy.org Mon Jul 1 14:44:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 14:44:34 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20130701124434.A711C1C3498@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r333:e58e43f88445 Date: 2013-07-01 14:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/e58e43f88445/ Log: in-progress diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -764,20 +764,24 @@ long long elapsed_time; /* acquire the lock, but don't double-acquire it if already committing */ - if (d->public_descriptor->collection_lock != 'C') { - spinlock_acquire(d->public_descriptor->collection_lock, 'C'); - if (d->public_descriptor->stolen_objects.size != 0) - stm_normalize_stolen_objects(d); - } - + if (d->public_descriptor->collection_lock != 'C') + { + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); + if (d->public_descriptor->stolen_objects.size != 0) + stm_normalize_stolen_objects(d); + assert(!stm_has_got_any_lock(d)); + } + else + { + CancelLocks(d); + assert(!stm_has_got_any_lock(d)); + } assert(d->active != 0); assert(!is_inevitable(d)); assert(num < ABORT_REASONS); d->num_aborts[num]++; - CancelLocks(d); - /* compute the elapsed time */ if (d->start_real_time.tv_nsec != -1 && clock_gettime(CLOCK_MONOTONIC, &now) >= 0) { @@ -954,6 +958,7 @@ revision_t my_lock = d->my_lock; wlog_t *item; + assert(!stm_has_got_any_lock(d)); assert(d->public_descriptor->stolen_objects.size == 0); if (!g2l_any_entry(&d->public_to_private)) @@ -1032,6 +1037,46 @@ } G2L_LOOP_END; } +_Bool stm_has_got_any_lock(struct tx_descriptor *d) +{ + wlog_t *item; + int found_locked, found_unlocked; + + if (!g2l_any_entry(&d->public_to_private)) + return 0; + + found_locked = 0; + found_unlocked = 0; + + G2L_LOOP_FORWARD(d->public_to_private, item) + { + gcptr R = item->addr; + gcptr L = item->val; + if (L == NULL) + continue; + + revision_t expected, v = L->h_revision; + + if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + expected = (revision_t)R; + else + expected = *d->private_revision_ref; + + if (v == expected) + { + assert(R->h_revision != d->my_lock); + found_unlocked = 1; + continue; + } + + found_locked = 1; + assert(found_unlocked == 0); /* an unlocked followed by a locked: no */ + + } G2L_LOOP_END; + + return found_locked; +} + static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER; static void UpdateChainHeads(struct tx_descriptor *d, revision_t cur_time, diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -190,6 +190,7 @@ gcptr stm_get_read_obj(long); /* debugging */ void stm_clear_read_cache(void); /* debugging */ void _stm_test_forget_previous_state(void); /* debugging */ +_Bool stm_has_got_any_lock(struct tx_descriptor *); struct tx_public_descriptor *stm_get_free_public_descriptor(revision_t *); int DescriptorInit(void); diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -346,6 +346,7 @@ { struct tx_descriptor *d; for (d = stm_tx_head; d; d = d->tx_next) { + assert(!stm_has_got_any_lock(d)); /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); @@ -356,15 +357,24 @@ /* the current transaction's private copies of public objects */ wlog_t *item; + struct G2L new_public_to_private; + memset(&new_public_to_private, 0, sizeof(new_public_to_private)); + G2L_LOOP_FORWARD(d->public_to_private, item) { /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ - visit(&item->addr); - visit(&item->val); + gcptr key = item->addr; + gcptr val = item->val; + visit(&key); + visit(&val); + g2l_insert(&new_public_to_private, key, val); } G2L_LOOP_END; + g2l_delete_not_used_any_more(&d->public_to_private); + d->public_to_private = new_public_to_private; + /* make sure that the other lists are empty */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); @@ -587,6 +597,7 @@ struct tx_descriptor *d; for (d = stm_tx_head; d; d = d->tx_next) { free_unused_local_pages(d->public_descriptor); + assert(!stm_has_got_any_lock(d)); } } diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -536,6 +536,7 @@ { dprintf(("minor collection [%p to %p]\n", d->nursery_base, d->nursery_end)); + assert(!stm_has_got_any_lock(d)); /* acquire the "collection lock" first */ setup_minor_collect(d); @@ -562,6 +563,7 @@ with GCFLAG_OLD */ teardown_minor_collect(d); + assert(!stm_has_got_any_lock(d)); /* When doing minor collections with the nursery "mostly empty", as occurs when other threads force major collections but this From noreply at buildbot.pypy.org Mon Jul 1 15:02:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 15:02:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20130701130214.6EC231C34A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r334:345ed0d7dc64 Date: 2013-07-01 15:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/345ed0d7dc64/ Log: in-progress diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -293,6 +293,20 @@ } } +static void visit_keep(gcptr obj) +{ + if (!(obj->h_tid & GCFLAG_VISITED)) { + obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ + obj->h_tid |= GCFLAG_VISITED; + gcptrlist_insert(&objects_to_trace, obj); + + if (IS_POINTER(obj->h_revision)) { + assert(!(obj->h_revision & 2)); + visit((gcptr *)&obj->h_revision); + } + } +} + static void visit_all_objects(void) { while (gcptrlist_size(&objects_to_trace) > 0) { @@ -316,7 +330,6 @@ for (; pobj != pend; pobj++) { obj = *pobj; assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - obj->h_tid &= ~GCFLAG_VISITED; assert(IS_POINTER(obj->h_revision)); visit((gcptr *)&obj->h_revision); } @@ -357,24 +370,14 @@ /* the current transaction's private copies of public objects */ wlog_t *item; - struct G2L new_public_to_private; - memset(&new_public_to_private, 0, sizeof(new_public_to_private)); - G2L_LOOP_FORWARD(d->public_to_private, item) { - /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ - gcptr key = item->addr; - gcptr val = item->val; - visit(&key); - visit(&val); - g2l_insert(&new_public_to_private, key, val); - + visit_keep(item->addr); + if (item->val != NULL) + visit_keep(item->val); } G2L_LOOP_END; - g2l_delete_not_used_any_more(&d->public_to_private); - d->public_to_private = new_public_to_private; - /* make sure that the other lists are empty */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); From noreply at buildbot.pypy.org Mon Jul 1 15:07:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 15:07:44 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix: we must explicitly keep the property that L->h_revision==R that Message-ID: <20130701130744.718DB1C0168@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r335:bf56c12295c8 Date: 2013-07-01 15:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/bf56c12295c8/ Log: Fix: we must explicitly keep the property that L->h_revision==R that some pairs in public_to_private have got. diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -373,9 +373,20 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ - visit_keep(item->addr); - if (item->val != NULL) - visit_keep(item->val); + gcptr R = item->addr; + gcptr L = item->val; + visit_keep(R); + if (L != NULL) { + revision_t v = L->h_revision; + visit_keep(L); + /* a bit of custom logic here: if L->h_revision used to + point exactly to R, as set by stealing, then we must + keep this property, even though visit_keep(L) might + decide it would be better to make it point to a more + recent copy. */ + if (v == (revision_t)R) + L->h_revision = v; /* restore */ + } } G2L_LOOP_END; /* make sure that the other lists are empty */ From noreply at buildbot.pypy.org Mon Jul 1 15:14:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 15:14:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: import stmgc/bf56c12295c8 Message-ID: <20130701131401.3AB9E1C0168@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65135:12aa03d05cb3 Date: 2013-07-01 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/12aa03d05cb3/ Log: import stmgc/bf56c12295c8 diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -765,20 +765,24 @@ long long elapsed_time; /* acquire the lock, but don't double-acquire it if already committing */ - if (d->public_descriptor->collection_lock != 'C') { - spinlock_acquire(d->public_descriptor->collection_lock, 'C'); - if (d->public_descriptor->stolen_objects.size != 0) - stm_normalize_stolen_objects(d); - } - + if (d->public_descriptor->collection_lock != 'C') + { + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); + if (d->public_descriptor->stolen_objects.size != 0) + stm_normalize_stolen_objects(d); + assert(!stm_has_got_any_lock(d)); + } + else + { + CancelLocks(d); + assert(!stm_has_got_any_lock(d)); + } assert(d->active != 0); assert(!is_inevitable(d)); assert(num < ABORT_REASONS); d->num_aborts[num]++; - CancelLocks(d); - /* compute the elapsed time */ if (d->start_real_time.tv_nsec != -1 && clock_gettime(CLOCK_MONOTONIC, &now) >= 0) { @@ -907,6 +911,8 @@ long stm_atomic(long delta) { struct tx_descriptor *d = thread_descriptor; + if (delta) // no atomic-checks + dprintf(("stm_atomic(%lu)\n", delta)); d->atomic += delta; assert(d->atomic >= 0); update_reads_size_limit(d); @@ -953,6 +959,7 @@ revision_t my_lock = d->my_lock; wlog_t *item; + assert(!stm_has_got_any_lock(d)); assert(d->public_descriptor->stolen_objects.size == 0); if (!g2l_any_entry(&d->public_to_private)) @@ -1031,6 +1038,46 @@ } G2L_LOOP_END; } +_Bool stm_has_got_any_lock(struct tx_descriptor *d) +{ + wlog_t *item; + int found_locked, found_unlocked; + + if (!g2l_any_entry(&d->public_to_private)) + return 0; + + found_locked = 0; + found_unlocked = 0; + + G2L_LOOP_FORWARD(d->public_to_private, item) + { + gcptr R = item->addr; + gcptr L = item->val; + if (L == NULL) + continue; + + revision_t expected, v = L->h_revision; + + if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + expected = (revision_t)R; + else + expected = *d->private_revision_ref; + + if (v == expected) + { + assert(R->h_revision != d->my_lock); + found_unlocked = 1; + continue; + } + + found_locked = 1; + assert(found_unlocked == 0); /* an unlocked followed by a locked: no */ + + } G2L_LOOP_END; + + return found_locked; +} + static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER; static void UpdateChainHeads(struct tx_descriptor *d, revision_t cur_time, @@ -1505,7 +1552,6 @@ revision_t i; struct tx_descriptor *d = stm_malloc(sizeof(struct tx_descriptor)); memset(d, 0, sizeof(struct tx_descriptor)); - stmgcpage_acquire_global_lock(); struct tx_public_descriptor *pd; i = descriptor_array_free_list; @@ -1555,7 +1601,6 @@ (long)d->public_descriptor_index, (long)pthread_self())); stmgcpage_init_tls(); - stmgcpage_release_global_lock(); return 1; } else @@ -1568,7 +1613,6 @@ struct tx_descriptor *d = thread_descriptor; assert(d != NULL); assert(d->active == 0); - stmgcpage_acquire_global_lock(); /* our nursery is empty at this point. The list 'stolen_objects' should have been emptied at the previous minor collection and @@ -1586,7 +1630,6 @@ if (d->tx_prev != NULL) d->tx_prev->tx_next = d->tx_next; if (d->tx_next != NULL) d->tx_next->tx_prev = d->tx_prev; if (d == stm_tx_head) stm_tx_head = d->tx_next; - stmgcpage_release_global_lock(); thread_descriptor = NULL; diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -191,6 +191,7 @@ gcptr stm_get_read_obj(long); /* debugging */ void stm_clear_read_cache(void); /* debugging */ void _stm_test_forget_previous_state(void); /* debugging */ +_Bool stm_has_got_any_lock(struct tx_descriptor *); struct tx_public_descriptor *stm_get_free_public_descriptor(revision_t *); int DescriptorInit(void); diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -220,17 +220,14 @@ return; restart: - if (obj->h_tid & GCFLAG_VISITED) { - dprintf(("[already visited: %p]\n", obj)); - assert(obj == *pobj); - assert((obj->h_revision & 3) || /* either odd, or stub */ - (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - return; /* already seen */ - } - if (obj->h_revision & 1) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see also fix_outdated() */ + assert(!(obj->h_tid & GCFLAG_STUB)); + if (!(obj->h_tid & GCFLAG_VISITED)) { + obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ + obj->h_tid |= GCFLAG_VISITED; + gcptrlist_insert(&objects_to_trace, obj); + } } else if (obj->h_tid & GCFLAG_PUBLIC) { /* h_revision is a ptr: we have a more recent version */ @@ -267,27 +264,48 @@ *pobj = obj; goto restart; } + else if (obj->h_tid & GCFLAG_VISITED) { + dprintf(("[already visited: %p]\n", obj)); + assert(obj == *pobj); + assert((obj->h_revision & 3) || /* either odd, or stub */ + (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + return; /* already seen */ + } else { assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); gcptr B = (gcptr)obj->h_revision; - gcptrlist_insert(&objects_to_trace, B); + assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - if (!(B->h_tid & GCFLAG_PUBLIC)) { - /* a regular private_from_protected object with a backup copy B */ - assert(B->h_tid & GCFLAG_BACKUP_COPY); - assert(B->h_revision & 1); - B->h_tid |= GCFLAG_VISITED; - } - else { - /* a private_from_protected with a stolen backup copy B */ + obj->h_tid |= GCFLAG_VISITED; + B->h_tid |= GCFLAG_VISITED; + assert(!(obj->h_tid & GCFLAG_STUB)); + assert(!(B->h_tid & GCFLAG_STUB)); + gcptrlist_insert2(&objects_to_trace, obj, B); + + if (IS_POINTER(B->h_revision)) { + assert(B->h_tid & GCFLAG_PUBLIC); assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - gcptr obj1 = B; - visit(&obj1); /* xxx recursion? */ - obj->h_revision = (revision_t)obj1; + assert(!(B->h_revision & 2)); + + pobj = (gcptr *)&B->h_revision; + obj = *pobj; + goto restart; } } - obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); +} + +static void visit_keep(gcptr obj) +{ + if (!(obj->h_tid & GCFLAG_VISITED)) { + obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ + obj->h_tid |= GCFLAG_VISITED; + gcptrlist_insert(&objects_to_trace, obj); + + if (IS_POINTER(obj->h_revision)) { + assert(!(obj->h_revision & 2)); + visit((gcptr *)&obj->h_revision); + } + } } static void visit_all_objects(void) @@ -313,7 +331,6 @@ for (; pobj != pend; pobj++) { obj = *pobj; assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - obj->h_tid &= ~GCFLAG_VISITED; assert(IS_POINTER(obj->h_revision)); visit((gcptr *)&obj->h_revision); } @@ -343,6 +360,7 @@ { struct tx_descriptor *d; for (d = stm_tx_head; d; d = d->tx_next) { + assert(!stm_has_got_any_lock(d)); /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); @@ -354,12 +372,22 @@ /* the current transaction's private copies of public objects */ wlog_t *item; G2L_LOOP_FORWARD(d->public_to_private, item) { - /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ - visit(&item->addr); - visit(&item->val); - + gcptr R = item->addr; + gcptr L = item->val; + visit_keep(R); + if (L != NULL) { + revision_t v = L->h_revision; + visit_keep(L); + /* a bit of custom logic here: if L->h_revision used to + point exactly to R, as set by stealing, then we must + keep this property, even though visit_keep(L) might + decide it would be better to make it point to a more + recent copy. */ + if (v == (revision_t)R) + L->h_revision = v; /* restore */ + } } G2L_LOOP_END; /* make sure that the other lists are empty */ @@ -584,6 +612,7 @@ struct tx_descriptor *d; for (d = stm_tx_head; d; d = d->tx_next) { free_unused_local_pages(d->public_descriptor); + assert(!stm_has_got_any_lock(d)); } } @@ -626,6 +655,7 @@ if (d != saved) { /* Hack: temporarily pretend that we "are" the other thread... */ + assert(d->shadowstack_end_ref && *d->shadowstack_end_ref); thread_descriptor = d; stm_private_rev_num = *d->private_revision_ref; stm_read_barrier_cache = *d->read_barrier_cache_ref; diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -92,7 +92,7 @@ assert(tid == (tid & STM_USER_TID_MASK)); gcptr P = allocate_nursery(size, tid); P->h_revision = stm_private_rev_num; - P->h_original = 0; + assert(P->h_original == 0); /* null-initialized already */ return P; } @@ -199,8 +199,6 @@ return (revision_t)p; } - - spinlock_acquire(d->public_descriptor->collection_lock, 'I'); /* old objects must have an h_original xOR be the original itself. @@ -222,7 +220,6 @@ gcptr O = stmgc_duplicate_old(p); p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; - O->h_tid |= GCFLAG_PUBLIC; if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { gcptr B = (gcptr)p->h_revision; @@ -540,6 +537,7 @@ { dprintf(("minor collection [%p to %p]\n", d->nursery_base, d->nursery_end)); + assert(!stm_has_got_any_lock(d)); /* acquire the "collection lock" first */ setup_minor_collect(d); @@ -566,6 +564,7 @@ with GCFLAG_OLD */ teardown_minor_collect(d); + assert(!stm_has_got_any_lock(d)); /* When doing minor collections with the nursery "mostly empty", as occurs when other threads force major collections but this diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -59eb9a85c3d1 +bf56c12295c8 diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -80,12 +80,14 @@ void stm_initialize(void) { + stmgcpage_acquire_global_lock(); int r = DescriptorInit(); if (r != 1) stm_fatalerror("stm_initialize: DescriptorInit failure\n"); stmgc_init_nursery(); init_shadowstack(); //stmgcpage_init_tls(); + stmgcpage_release_global_lock(); BeginInevitableTransaction(); } @@ -93,10 +95,12 @@ { stmgc_minor_collect(); /* force everything out of the nursery */ CommitTransaction(); + stmgcpage_acquire_global_lock(); //stmgcpage_done_tls(); done_shadowstack(); stmgc_done_nursery(); DescriptorDone(); + stmgcpage_release_global_lock(); } /************************************************************/ From noreply at buildbot.pypy.org Mon Jul 1 15:32:54 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 15:32:54 +0200 (CEST) Subject: [pypy-commit] pypy default: a fast path for set equality (note that if len(s1) == len(s2), then s1.issubset(s2) -> s1 == s2) Message-ID: <20130701133254.527591C0296@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65136:588a9d71f1d6 Date: 2013-07-01 15:32 +0200 http://bitbucket.org/pypy/pypy/changeset/588a9d71f1d6/ Log: a fast path for set equality (note that if len(s1) == len(s2), then s1.issubset(s2) -> s1 == s2) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -944,9 +944,11 @@ if w_set.length() == 0: return True # it's possible to have 0-lenght strategy that's not empty - items = self.unerase(w_set.sstorage).keys() + if w_set.strategy is w_other.strategy: + return self._issubset_unwrapped(w_set, w_other) if not self.may_contain_equal_elements(w_other.strategy): return False + items = self.unerase(w_set.sstorage).keys() for key in items: if not w_other.has_key(self.wrap(key)): return False From noreply at buildbot.pypy.org Mon Jul 1 15:36:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 15:36:19 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Enough to have test_targetdemo compile (but not run yet). Message-ID: <20130701133619.4B0361C0296@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65137:aef695dc9da1 Date: 2013-07-01 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/aef695dc9da1/ Log: Enough to have test_targetdemo compile (but not run yet). diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -1,5 +1,5 @@ from rpython.rlib.objectmodel import we_are_translated, specialize -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry @@ -37,73 +37,48 @@ stmgcintf.StmOperations.abort_and_retry() def before_external_call(): - if not is_atomic(): - e = get_errno() - llop.stm_stop_transaction(lltype.Void) - stmgcintf.StmOperations.commit_transaction() - set_errno(e) + llop.stm_commit_transaction(lltype.Void) before_external_call._dont_reach_me_in_del_ = True before_external_call._transaction_break_ = True def after_external_call(): - if not is_atomic(): - e = get_errno() - stmgcintf.StmOperations.begin_inevitable_transaction() - llop.stm_start_transaction(lltype.Void) - set_errno(e) + llop.stm_begin_inevitable_transaction(lltype.Void) after_external_call._dont_reach_me_in_del_ = True after_external_call._transaction_break_ = True def enter_callback_call(): - token = stmgcintf.StmOperations.descriptor_init() - if token != 1: - after_external_call() - else: - ll_assert(not is_atomic(), "new thread: is_atomic() != 0") - stmgcintf.StmOperations.begin_inevitable_transaction() - # the StmGCTLS is not built yet. leave it to gc_thread_start() - return token + # XXX assumes that we're not called in a fresh new thread + llop.stm_begin_inevitable_transaction(lltype.Void) + return 0 enter_callback_call._dont_reach_me_in_del_ = True enter_callback_call._transaction_break_ = True -def leave_callback_call(token): - if token != 1: - before_external_call() - else: - # the StmGCTLS is already destroyed, done by gc_thread_die() - # (we don't care if is_atomic() or not, we'll commit now) - stmgcintf.StmOperations.commit_transaction() - stmgcintf.StmOperations.descriptor_done() +def leave_callback_call(ignored): + llop.stm_commit_transaction(lltype.Void) leave_callback_call._dont_reach_me_in_del_ = True leave_callback_call._transaction_break_ = True # ____________________________________________________________ def make_perform_transaction(func, CONTAINERP): + from rpython.rtyper.annlowlevel import llhelper + from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr + from rpython.translator.stm.stmgcintf import CALLBACK_TX # def _stm_callback(llcontainer, retry_counter): - if not is_atomic(): - llop.stm_start_transaction(lltype.Void) llcontainer = rffi.cast(CONTAINERP, llcontainer) + retry_counter = rffi.cast(lltype.Signed, retry_counter) try: res = func(llcontainer, retry_counter) except Exception, e: - res = 0 # stop perform_transaction() and returns + res = 0 # ends perform_transaction() and returns lle = cast_instance_to_base_ptr(e) llcontainer.got_exception = lle - if not is_atomic(): - llop.stm_stop_transaction(lltype.Void) - return res + return rffi.cast(rffi.INT_real, res) # def perform_transaction(llcontainer): - before_external_call() - adr_of_top = llop.gc_adr_of_root_stack_top(llmemory.Address) - llcallback = llhelper(stmgcintf.StmOperations.CALLBACK_TX, - _stm_callback) - stmgcintf.StmOperations.perform_transaction(llcallback, llcontainer, - adr_of_top) - after_external_call() - keepalive_until_here(llcontainer) + llcallback = llhelper(CALLBACK_TX, _stm_callback) + llop.stm_perform_transaction(lltype.Void, llcontainer, llcallback) perform_transaction._transaction_break_ = True # return perform_transaction diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -440,6 +440,7 @@ 'stm_set_transaction_length': LLOp(), 'stm_change_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), + 'stm_perform_transaction':LLOp(), 'stm_threadlocalref_get': LLOp(sideeffects=False), 'stm_threadlocalref_set': LLOp(), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -603,6 +603,7 @@ OP_STM_GET_ATOMIC = _OP_STM OP_STM_THREADLOCAL_GET = _OP_STM OP_STM_THREADLOCAL_SET = _OP_STM + OP_STM_PERFORM_TRANSACTION = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -6,6 +6,7 @@ class StmHeaderOpaqueDefNode(Node): typetag = 'struct' + dependencies = () def __init__(self, db, T): Node.__init__(self, db) @@ -115,10 +116,10 @@ return '%s = stm_id((gcptr)%s);' % (result, arg0) def stm_commit_transaction(funcgen, op): - return 'stm_commit_transaction();' + return '{ int e = errno; stm_commit_transaction(); errno = e; }' def stm_begin_inevitable_transaction(funcgen, op): - return 'stm_begin_inevitable_transaction();' + return '{ int e = errno; stm_begin_inevitable_transaction(); errno = e; }' def stm_should_break_transaction(funcgen, op): result = funcgen.expr(op.result) @@ -145,6 +146,11 @@ arg0 = funcgen.expr(op.args[0]) return 'stm_thread_local_obj = (gcptr)%s;' % (arg0,) +def stm_perform_transaction(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + arg1 = funcgen.expr(op.args[1]) + return 'stm_perform_transaction((gcptr)%s, %s);' % (arg0, arg1) + def op_stm(funcgen, op): func = globals()[op.opname] diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py --- a/rpython/translator/stm/jitdriver.py +++ b/rpython/translator/stm/jitdriver.py @@ -107,12 +107,9 @@ # turn the following link into an "if False" link, add a new # "if True" link going to a fresh new block, and return this new # block. - funcptr = StmOperations.should_break_transaction - c = Constant(funcptr, lltype.typeOf(funcptr)) - v1 = varoftype(lltype.Signed) - block.operations.append(SpaceOperation('direct_call', [c], v1)) v2 = varoftype(lltype.Bool) - block.operations.append(SpaceOperation('int_is_true', [v1], v2)) + block.operations.append( + SpaceOperation('stm_should_break_transaction', [], v2)) # assert block.exitswitch is None [link] = block.exits diff --git a/rpython/translator/stm/stmgcintf.py b/rpython/translator/stm/stmgcintf.py --- a/rpython/translator/stm/stmgcintf.py +++ b/rpython/translator/stm/stmgcintf.py @@ -1,4 +1,5 @@ import os +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.conftest import cdir as cdir2 @@ -30,3 +31,7 @@ pre_include_bits = ['#define RPY_STM 1'], separate_module_sources = [separate_source], ) + +GCPTR = lltype.Ptr(rffi.COpaque('struct stm_object_s')) +CALLBACK_TX = lltype.Ptr(lltype.FuncType([GCPTR, rffi.INT_real], + rffi.INT_real)) From noreply at buildbot.pypy.org Mon Jul 1 15:39:59 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Mon, 1 Jul 2013 15:39:59 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-ptp: implemented put and array.put Message-ID: <20130701133959.DCF651C010B@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: ndarray-ptp Changeset: r65138:a73ce1bfeeab Date: 2013-06-27 00:06 -0300 http://bitbucket.org/pypy/pypy/changeset/a73ce1bfeeab/ Log: implemented put and array.put diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -183,6 +183,7 @@ appleveldefs = {} interpleveldefs = { 'choose': 'interp_arrayops.choose', + 'put': 'interp_arrayops.put', 'repeat': 'interp_arrayops.repeat', } submodules = { diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -192,6 +192,45 @@ loop.choose(space, arr, choices, shape, dtype, out, MODES[mode]) return out + + at unwrap_spec(mode=str) +def put(space, w_arr, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy import constants + from pypy.module.micronumpy.support import int_w + arr = convert_to_array(space, w_arr) + if mode not in constants.MODES: + raise OperationError(space.w_ValueError, + space.wrap("mode %s not known" % (mode,))) + indices = convert_to_array(space, w_indices) + values = convert_to_array(space, w_values) + if not indices: + raise OperationError(space.w_ValueError, + space.wrap("indice list cannot be empty")) + if not values: + raise OperationError(space.w_ValueError, + space.wrap("value list cannot be empty")) + dtype = arr.get_dtype() + val_iter = values.create_iter() + ind_iter = indices.create_iter() + while not ind_iter.done(): + index = int_w(space, ind_iter.getitem()) + if index < 0 or index >= arr.get_size(): + if constants.MODES[mode] == constants.MODE_RAISE: + raise OperationError(space.w_ValueError, space.wrap( + "invalid entry in choice array")) + elif constants.MODES[mode] == constants.MODE_WRAP: + index = index % arr.get_size() + else: + assert constants.MODES[mode] == constants.MODE_CLIP + if index < 0: + index = 0 + else: + index = arr.get_size() - 1 + arr.setitem(space, [index], val_iter.getitem().convert_to(dtype)) + ind_iter.next() + val_iter.next() + + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -550,9 +550,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ptp (peak to peak) not implemented yet")) - def descr_put(self, space, w_indices, w_values, w_mode='raise'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "put not implemented yet")) + @unwrap_spec(mode=str) + def descr_put(self, space, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy.interp_arrayops import put + put(space, self, w_indices, w_values, mode) def descr_resize(self, space, w_new_shape, w_refcheck=True): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -939,6 +940,7 @@ prod = interp2app(W_NDimArray.descr_prod), max = interp2app(W_NDimArray.descr_max), min = interp2app(W_NDimArray.descr_min), + put = interp2app(W_NDimArray.descr_put), argmax = interp2app(W_NDimArray.descr_argmax), argmin = interp2app(W_NDimArray.descr_argmin), all = interp2app(W_NDimArray.descr_all), diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -132,3 +132,20 @@ x = array([0, 0, 0], dtype='i2') r = array([2, 1, 0]).choose([a, b, c], out=x) assert r.dtype == 'i2' + + def test_put_basic(self): + from numpypy import arange, array + a = arange(5) + a.put([0,2], [-44, -55]) + assert (a == array([-44, 1, -55, 3, 4])).all() + + def test_put_modes(self): + from numpypy import array, arange + a = arange(5) + a.put(22, -5, mode='clip') + assert (a == array([0, 1, 2, 3, -5])).all() + a = arange(5) + a.put(22, -5, mode='wrap') + assert (a == array([0, 1, -5, 3, 4])).all() + raises(ValueError, "arange(5).put(22, -5, mode='raise')") + raises(ValueError, "arange(5).put(22, -5, mode='wrongmode')") From noreply at buildbot.pypy.org Mon Jul 1 15:40:01 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Mon, 1 Jul 2013 15:40:01 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-ptp: refactored put and added more tests. Message-ID: <20130701134001.1CCCC1C010B@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: ndarray-ptp Changeset: r65139:935fc2578913 Date: 2013-06-28 00:54 -0300 http://bitbucket.org/pypy/pypy/changeset/935fc2578913/ Log: refactored put and added more tests. diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -197,23 +197,35 @@ def put(space, w_arr, w_indices, w_values, mode='raise'): from pypy.module.micronumpy import constants from pypy.module.micronumpy.support import int_w + arr = convert_to_array(space, w_arr) + if mode not in constants.MODES: raise OperationError(space.w_ValueError, space.wrap("mode %s not known" % (mode,))) - indices = convert_to_array(space, w_indices) - values = convert_to_array(space, w_values) - if not indices: + if not w_indices: raise OperationError(space.w_ValueError, space.wrap("indice list cannot be empty")) - if not values: + if not w_values: raise OperationError(space.w_ValueError, space.wrap("value list cannot be empty")) + dtype = arr.get_dtype() - val_iter = values.create_iter() - ind_iter = indices.create_iter() - while not ind_iter.done(): - index = int_w(space, ind_iter.getitem()) + + if space.isinstance_w(w_indices, space.w_list): + indices = space.listview(w_indices) + else: + indices = [w_indices] + + if space.isinstance_w(w_values, space.w_list): + values = space.listview(w_values) + else: + values = [w_values] + + v_idx = 0 + for idx in indices: + index = int_w(space, idx) + if index < 0 or index >= arr.get_size(): if constants.MODES[mode] == constants.MODE_RAISE: raise OperationError(space.w_ValueError, space.wrap( @@ -226,9 +238,13 @@ index = 0 else: index = arr.get_size() - 1 - arr.setitem(space, [index], val_iter.getitem().convert_to(dtype)) - ind_iter.next() - val_iter.next() + + value = values[v_idx] + + if v_idx + 1 < len(values): + v_idx += 1 + + arr.setitem(space, [index], dtype.coerce(space, value)) def diagonal(space, arr, offset, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -136,8 +136,14 @@ def test_put_basic(self): from numpypy import arange, array a = arange(5) - a.put([0,2], [-44, -55]) + a.put([0, 2], [-44, -55]) assert (a == array([-44, 1, -55, 3, 4])).all() + a = arange(5) + a.put([3, 4], 9) + assert (a == array([0, 1, 2, 9, 9])).all() + a = arange(5) + a.put(1, [7, 8]) + assert (a == array([0, 7, 2, 3, 4])).all() def test_put_modes(self): from numpypy import array, arange From noreply at buildbot.pypy.org Mon Jul 1 15:40:02 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 1 Jul 2013 15:40:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in andrewsmedina/numpypy/ndarray-ptp (pull request #157) Message-ID: <20130701134002.723EF1C010B@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r65140:1968b8b5d429 Date: 2013-07-01 15:39 +0200 http://bitbucket.org/pypy/pypy/changeset/1968b8b5d429/ Log: Merged in andrewsmedina/numpypy/ndarray-ptp (pull request #157) implemented ndarray put diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -184,6 +184,7 @@ appleveldefs = {} interpleveldefs = { 'choose': 'interp_arrayops.choose', + 'put': 'interp_arrayops.put', 'repeat': 'interp_arrayops.repeat', } submodules = { diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -192,6 +192,61 @@ loop.choose(space, arr, choices, shape, dtype, out, MODES[mode]) return out + + at unwrap_spec(mode=str) +def put(space, w_arr, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy import constants + from pypy.module.micronumpy.support import int_w + + arr = convert_to_array(space, w_arr) + + if mode not in constants.MODES: + raise OperationError(space.w_ValueError, + space.wrap("mode %s not known" % (mode,))) + if not w_indices: + raise OperationError(space.w_ValueError, + space.wrap("indice list cannot be empty")) + if not w_values: + raise OperationError(space.w_ValueError, + space.wrap("value list cannot be empty")) + + dtype = arr.get_dtype() + + if space.isinstance_w(w_indices, space.w_list): + indices = space.listview(w_indices) + else: + indices = [w_indices] + + if space.isinstance_w(w_values, space.w_list): + values = space.listview(w_values) + else: + values = [w_values] + + v_idx = 0 + for idx in indices: + index = int_w(space, idx) + + if index < 0 or index >= arr.get_size(): + if constants.MODES[mode] == constants.MODE_RAISE: + raise OperationError(space.w_ValueError, space.wrap( + "invalid entry in choice array")) + elif constants.MODES[mode] == constants.MODE_WRAP: + index = index % arr.get_size() + else: + assert constants.MODES[mode] == constants.MODE_CLIP + if index < 0: + index = 0 + else: + index = arr.get_size() - 1 + + value = values[v_idx] + + if v_idx + 1 < len(values): + v_idx += 1 + + arr.setitem(space, [index], dtype.coerce(space, value)) + + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -550,9 +550,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ptp (peak to peak) not implemented yet")) - def descr_put(self, space, w_indices, w_values, w_mode='raise'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "put not implemented yet")) + @unwrap_spec(mode=str) + def descr_put(self, space, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy.interp_arrayops import put + put(space, self, w_indices, w_values, mode) def descr_resize(self, space, w_new_shape, w_refcheck=True): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -989,6 +990,7 @@ prod = interp2app(W_NDimArray.descr_prod), max = interp2app(W_NDimArray.descr_max), min = interp2app(W_NDimArray.descr_min), + put = interp2app(W_NDimArray.descr_put), argmax = interp2app(W_NDimArray.descr_argmax), argmin = interp2app(W_NDimArray.descr_argmin), all = interp2app(W_NDimArray.descr_all), diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -132,3 +132,26 @@ x = array([0, 0, 0], dtype='i2') r = array([2, 1, 0]).choose([a, b, c], out=x) assert r.dtype == 'i2' + + def test_put_basic(self): + from numpypy import arange, array + a = arange(5) + a.put([0, 2], [-44, -55]) + assert (a == array([-44, 1, -55, 3, 4])).all() + a = arange(5) + a.put([3, 4], 9) + assert (a == array([0, 1, 2, 9, 9])).all() + a = arange(5) + a.put(1, [7, 8]) + assert (a == array([0, 7, 2, 3, 4])).all() + + def test_put_modes(self): + from numpypy import array, arange + a = arange(5) + a.put(22, -5, mode='clip') + assert (a == array([0, 1, 2, 3, -5])).all() + a = arange(5) + a.put(22, -5, mode='wrap') + assert (a == array([0, 1, -5, 3, 4])).all() + raises(ValueError, "arange(5).put(22, -5, mode='raise')") + raises(ValueError, "arange(5).put(22, -5, mode='wrongmode')") From noreply at buildbot.pypy.org Mon Jul 1 16:18:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 16:18:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: stm_enter_callback_call(), for possibly-recursive invocations Message-ID: <20130701141813.E8AA61C010B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r336:637f6c9d19f7 Date: 2013-07-01 16:17 +0200 http://bitbucket.org/pypy/stmgc/changeset/637f6c9d19f7/ Log: stm_enter_callback_call(), for possibly-recursive invocations diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1538,15 +1538,14 @@ __thread gcptr stm_thread_local_obj; -int DescriptorInit(void) +void DescriptorInit(void) { if (GCFLAG_PREBUILT != PREBUILT_FLAGS) { stm_fatalerror("fix PREBUILT_FLAGS in stmgc.h by giving " "it the same value as GCFLAG_PREBUILT!\n"); } - - if (thread_descriptor == NULL) + else { revision_t i; struct tx_descriptor *d = stm_malloc(sizeof(struct tx_descriptor)); @@ -1594,16 +1593,14 @@ d->tx_next = stm_tx_head; if (d->tx_next != NULL) d->tx_next->tx_prev = d; stm_tx_head = d; + assert(thread_descriptor == NULL); thread_descriptor = d; dprintf(("[%lx] pthread %lx starting\n", (long)d->public_descriptor_index, (long)pthread_self())); stmgcpage_init_tls(); - return 1; } - else - return 0; } void DescriptorDone(void) diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -193,7 +193,7 @@ _Bool stm_has_got_any_lock(struct tx_descriptor *); struct tx_public_descriptor *stm_get_free_public_descriptor(revision_t *); -int DescriptorInit(void); +void DescriptorInit(void); void DescriptorDone(void); #endif /* _ET_H */ diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -48,6 +48,12 @@ void stm_initialize(void); void stm_finalize(void); +/* alternate initializers/deinitializers, to use for places that may or + may not be recursive, like callbacks from C code. The return value + of the first one must be passed as argument to the second. */ +int stm_enter_callback_call(void); +void stm_leave_callback_call(int); + /* read/write barriers (the most general versions only for now) */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -77,29 +77,46 @@ d->max_aborts = max_aborts; } +int stm_enter_callback_call(void) +{ + int token = (thread_descriptor == NULL); + if (token == 1) { + stmgcpage_acquire_global_lock(); + DescriptorInit(); + stmgc_init_nursery(); + init_shadowstack(); + stmgcpage_release_global_lock(); + } + BeginInevitableTransaction(); + return token; +} + +void stm_leave_callback_call(int token) +{ + if (token == 1) + stmgc_minor_collect(); /* force everything out of the nursery */ + + CommitTransaction(); + + if (token == 1) { + stmgcpage_acquire_global_lock(); + done_shadowstack(); + stmgc_done_nursery(); + DescriptorDone(); + stmgcpage_release_global_lock(); + } +} + void stm_initialize(void) { - stmgcpage_acquire_global_lock(); - int r = DescriptorInit(); + int r = stm_enter_callback_call(); if (r != 1) - stm_fatalerror("stm_initialize: DescriptorInit failure\n"); - stmgc_init_nursery(); - init_shadowstack(); - //stmgcpage_init_tls(); - stmgcpage_release_global_lock(); - BeginInevitableTransaction(); + stm_fatalerror("stm_initialize: already initialized\n"); } void stm_finalize(void) { - stmgc_minor_collect(); /* force everything out of the nursery */ - CommitTransaction(); - stmgcpage_acquire_global_lock(); - //stmgcpage_done_tls(); - done_shadowstack(); - stmgc_done_nursery(); - DescriptorDone(); - stmgcpage_release_global_lock(); + stm_leave_callback_call(1); } /************************************************************/ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -63,6 +63,8 @@ void stm_set_transaction_length(long length_max); _Bool stm_should_break_transaction(void); long stm_atomic(long delta); + int stm_enter_callback_call(void); + void stm_leave_callback_call(int); /* extra non-public code */ void printfcolor(char *msg); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -630,7 +630,15 @@ assert lib.stm_hash(p) != lib.stm_id(p) run_parallel(f1, f2) - - - - +def test_enter_callback_call(): + lib.stm_commit_transaction() + x = lib.stm_enter_callback_call() + assert x == 0 + lib.stm_leave_callback_call(x) + lib.stm_begin_inevitable_transaction() + # + lib.stm_finalize() + x = lib.stm_enter_callback_call() + assert x == 1 + lib.stm_leave_callback_call(x) + lib.stm_initialize_tests(0) From noreply at buildbot.pypy.org Mon Jul 1 17:18:41 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 1 Jul 2013 17:18:41 +0200 (CEST) Subject: [pypy-commit] pypy default: add -fPIC flag Message-ID: <20130701151841.1CFA51C0168@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65141:2c5be52130b3 Date: 2013-07-01 17:17 +0200 http://bitbucket.org/pypy/pypy/changeset/2c5be52130b3/ Log: add -fPIC flag diff --git a/rpython/translator/platform/arm.py b/rpython/translator/platform/arm.py --- a/rpython/translator/platform/arm.py +++ b/rpython/translator/platform/arm.py @@ -17,6 +17,7 @@ class ARM(Linux): name = "arm" + shared_only = ('-fPIC',) available_librarydirs = [SB2 + '/lib/arm-linux-gnueabi/', SB2 + '/lib/arm-linux-gnueabihf/', From noreply at buildbot.pypy.org Mon Jul 1 17:21:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 17:21:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Fix start_new_thread with stm. Message-ID: <20130701152106.8689F1C30E9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65142:f0f5f39180fd Date: 2013-07-01 17:13 +0200 http://bitbucket.org/pypy/pypy/changeset/f0f5f39180fd/ Log: Fix start_new_thread with stm. diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -53,9 +53,10 @@ needs_finalizer=False, is_finalizer_light=False, contains_weakptr=False): - ll_assert(not needs_finalizer, 'XXX') - ll_assert(not is_finalizer_light, 'XXX') - ll_assert(not contains_weakptr, 'XXX') + # XXX finalizers are ignored for now + #ll_assert(not needs_finalizer, 'XXX needs_finalizer') + #ll_assert(not is_finalizer_light, 'XXX is_finalizer_light') + ll_assert(not contains_weakptr, 'XXX contains_weakptr') # XXX call optimized versions, e.g. if size < GC_NURSERY_SECTION return llop.stm_allocate(llmemory.GCREF, size, typeid16) diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -50,12 +50,7 @@ class StmRootWalker(BaseRootWalker): def need_thread_support(self, gctransformer, getfn): - def thread_start(): - llop.stm_initialize(lltype.Void) - def thread_die(): - llop.stm_finalize(lltype.Void) - self.thread_start_ptr = getfn(thread_start, [], annmodel.s_None) - self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) + pass def walk_stack_roots(self, collect_stack_root): raise NotImplementedError diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -47,17 +47,21 @@ after_external_call._transaction_break_ = True def enter_callback_call(): - # XXX assumes that we're not called in a fresh new thread - llop.stm_begin_inevitable_transaction(lltype.Void) - return 0 + return llop.stm_enter_callback_call(lltype.Signed) enter_callback_call._dont_reach_me_in_del_ = True enter_callback_call._transaction_break_ = True -def leave_callback_call(ignored): - llop.stm_commit_transaction(lltype.Void) +def leave_callback_call(token): + llop.stm_leave_callback_call(lltype.Void, token) leave_callback_call._dont_reach_me_in_del_ = True leave_callback_call._transaction_break_ = True +def invoke_around_extcall(): + """Initialize the STM system. Must be called once from the start-up.""" + from rpython.rlib.objectmodel import invoke_around_extcall + invoke_around_extcall(before_external_call, after_external_call, + enter_callback_call, leave_callback_call) + # ____________________________________________________________ def make_perform_transaction(func, CONTAINERP): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -441,6 +441,8 @@ 'stm_change_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), 'stm_perform_transaction':LLOp(), + 'stm_enter_callback_call':LLOp(), + 'stm_leave_callback_call':LLOp(), 'stm_threadlocalref_get': LLOp(sideeffects=False), 'stm_threadlocalref_set': LLOp(), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -604,6 +604,8 @@ OP_STM_THREADLOCAL_GET = _OP_STM OP_STM_THREADLOCAL_SET = _OP_STM OP_STM_PERFORM_TRANSACTION = _OP_STM + OP_STM_ENTER_CALLBACK_CALL = _OP_STM + OP_STM_LEAVE_CALLBACK_CALL = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -151,6 +151,14 @@ arg1 = funcgen.expr(op.args[1]) return 'stm_perform_transaction((gcptr)%s, %s);' % (arg0, arg1) +def stm_enter_callback_call(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_enter_callback_call();' % (result,) + +def stm_leave_callback_call(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return 'stm_leave_callback_call(%s);' % (arg0,) + def op_stm(funcgen, op): func = globals()[op.opname] diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -1539,15 +1539,14 @@ __thread gcptr stm_thread_local_obj; -int DescriptorInit(void) +void DescriptorInit(void) { if (GCFLAG_PREBUILT != PREBUILT_FLAGS) { stm_fatalerror("fix PREBUILT_FLAGS in stmgc.h by giving " "it the same value as GCFLAG_PREBUILT!\n"); } - - if (thread_descriptor == NULL) + else { revision_t i; struct tx_descriptor *d = stm_malloc(sizeof(struct tx_descriptor)); @@ -1595,16 +1594,14 @@ d->tx_next = stm_tx_head; if (d->tx_next != NULL) d->tx_next->tx_prev = d; stm_tx_head = d; + assert(thread_descriptor == NULL); thread_descriptor = d; dprintf(("[%lx] pthread %lx starting\n", (long)d->public_descriptor_index, (long)pthread_self())); stmgcpage_init_tls(); - return 1; } - else - return 0; } void DescriptorDone(void) diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -194,7 +194,7 @@ _Bool stm_has_got_any_lock(struct tx_descriptor *); struct tx_public_descriptor *stm_get_free_public_descriptor(revision_t *); -int DescriptorInit(void); +void DescriptorInit(void); void DescriptorDone(void); #endif /* _ET_H */ diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -bf56c12295c8 +637f6c9d19f7 diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -49,6 +49,12 @@ void stm_initialize(void); void stm_finalize(void); +/* alternate initializers/deinitializers, to use for places that may or + may not be recursive, like callbacks from C code. The return value + of the first one must be passed as argument to the second. */ +int stm_enter_callback_call(void); +void stm_leave_callback_call(int); + /* read/write barriers (the most general versions only for now) */ #if 0 // (optimized version below) gcptr stm_read_barrier(gcptr); diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -78,29 +78,46 @@ d->max_aborts = max_aborts; } +int stm_enter_callback_call(void) +{ + int token = (thread_descriptor == NULL); + if (token == 1) { + stmgcpage_acquire_global_lock(); + DescriptorInit(); + stmgc_init_nursery(); + init_shadowstack(); + stmgcpage_release_global_lock(); + } + BeginInevitableTransaction(); + return token; +} + +void stm_leave_callback_call(int token) +{ + if (token == 1) + stmgc_minor_collect(); /* force everything out of the nursery */ + + CommitTransaction(); + + if (token == 1) { + stmgcpage_acquire_global_lock(); + done_shadowstack(); + stmgc_done_nursery(); + DescriptorDone(); + stmgcpage_release_global_lock(); + } +} + void stm_initialize(void) { - stmgcpage_acquire_global_lock(); - int r = DescriptorInit(); + int r = stm_enter_callback_call(); if (r != 1) - stm_fatalerror("stm_initialize: DescriptorInit failure\n"); - stmgc_init_nursery(); - init_shadowstack(); - //stmgcpage_init_tls(); - stmgcpage_release_global_lock(); - BeginInevitableTransaction(); + stm_fatalerror("stm_initialize: already initialized\n"); } void stm_finalize(void) { - stmgc_minor_collect(); /* force everything out of the nursery */ - CommitTransaction(); - stmgcpage_acquire_global_lock(); - //stmgcpage_done_tls(); - done_shadowstack(); - stmgc_done_nursery(); - DescriptorDone(); - stmgcpage_release_global_lock(); + stm_leave_callback_call(1); } /************************************************************/ diff --git a/rpython/translator/stm/test/targetdemo2.py b/rpython/translator/stm/test/targetdemo2.py --- a/rpython/translator/stm/test/targetdemo2.py +++ b/rpython/translator/stm/test/targetdemo2.py @@ -1,7 +1,7 @@ import time from rpython.rlib import rthread from rpython.rlib import rstm, jit -from rpython.rlib.objectmodel import invoke_around_extcall, we_are_translated +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.objectmodel import compute_identity_hash from rpython.rlib.debug import ll_assert from rpython.rtyper.lltypesystem import lltype, rffi, rclass @@ -250,8 +250,7 @@ def setup_threads(): #space.threadlocals.setup_threads(space) bootstrapper.setup() - invoke_around_extcall(rstm.before_external_call, rstm.after_external_call, - rstm.enter_callback_call, rstm.leave_callback_call) + rstm.invoke_around_extcall() def start_thread(args): bootstrapper.acquire(args) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -62,12 +62,11 @@ glob = Global() # def threadfn(): - rthread.gc_thread_start() x = Global() x.value = 0 glob.seen = x - rthread.gc_thread_die() def entry_point(argv): + rstm.invoke_around_extcall() glob.seen = None rthread.start_new_thread(threadfn, ()) while glob.seen is None: From noreply at buildbot.pypy.org Mon Jul 1 17:21:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 17:21:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Call invoke_around_extcall() automatically with stm. Message-ID: <20130701152107.ABA791C30E9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65143:e68bd4bac940 Date: 2013-07-01 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/e68bd4bac940/ Log: Call invoke_around_extcall() automatically with stm. diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -56,8 +56,9 @@ leave_callback_call._dont_reach_me_in_del_ = True leave_callback_call._transaction_break_ = True -def invoke_around_extcall(): - """Initialize the STM system. Must be called once from the start-up.""" +def register_invoke_around_extcall(): + """Initialize the STM system. + Called automatically by rthread.start_new_thread().""" from rpython.rlib.objectmodel import invoke_around_extcall invoke_around_extcall(before_external_call, after_external_call, enter_callback_call, leave_callback_call) diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -97,6 +97,9 @@ @specialize.arg(0) def ll_start_new_thread(func): + if rgc.stm_is_enabled: + from rpython.rlib.rstm import register_invoke_around_extcall + register_invoke_around_extcall() ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -331,13 +331,8 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator entrypoint = self.entrypoint - stm_nogc = (self.config.translation.stm and - self.config.translation.gc == "none") # def entrypoint_wrapper(argc, argv): - if stm_nogc: - from rpython.translator.stm.funcgen import _stm_nogc_init_function - _stm_nogc_init_function() list = [""] * argc i = 0 while i < argc: diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -66,7 +66,6 @@ x.value = 0 glob.seen = x def entry_point(argv): - rstm.invoke_around_extcall() glob.seen = None rthread.start_new_thread(threadfn, ()) while glob.seen is None: From noreply at buildbot.pypy.org Mon Jul 1 17:35:01 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 17:35:01 +0200 (CEST) Subject: [pypy-commit] pypy inline-identityhash: A branch to experiment with inlining identityhash Message-ID: <20130701153501.526E21C32B8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: inline-identityhash Changeset: r65144:bbf95dc4677f Date: 2013-07-01 17:34 +0200 http://bitbucket.org/pypy/pypy/changeset/bbf95dc4677f/ Log: A branch to experiment with inlining identityhash diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -57,8 +57,8 @@ from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint from rpython.rlib.rarithmetic import LONG_BIT_SHIFT from rpython.rlib.debug import ll_assert, debug_print, debug_start, debug_stop -from rpython.rlib.objectmodel import we_are_translated -from rpython.tool.sourcetools import func_with_new_name +from rpython.rlib.objectmodel import specialize + # # Handles the objects in 2 generations: @@ -1824,6 +1824,48 @@ # ---------- # id() and identityhash() support + def _allocate_shadow(self, obj): + size_gc_header = self.gcheaderbuilder.size_gc_header + size = self.get_size(obj) + shadowhdr = self._malloc_out_of_nursery(size_gc_header + + size) + # Initialize the shadow enough to be considered a + # valid gc object. If the original object stays + # alive at the next minor collection, it will anyway + # be copied over the shadow and overwrite the + # following fields. But if the object dies, then + # the shadow will stay around and only be freed at + # the next major collection, at which point we want + # it to look valid (but ready to be freed). + shadow = shadowhdr + size_gc_header + self.header(shadow).tid = self.header(obj).tid + typeid = self.get_type_id(obj) + if self.is_varsize(typeid): + lenofs = self.varsize_offset_to_length(typeid) + (shadow + lenofs).signed[0] = (obj + lenofs).signed[0] + # + self.header(obj).tid |= GCFLAG_HAS_SHADOW + self.nursery_objects_shadows.setitem(obj, shadow) + return shadow + + def _find_shadow(self, obj): + # + # The object is not a tagged pointer, and it is still in the + # nursery. Find or allocate a "shadow" object, which is + # where the object will be moved by the next minor + # collection + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + shadow = self.nursery_objects_shadows.get(obj) + ll_assert(shadow != NULL, + "GCFLAG_HAS_SHADOW but no shadow found") + else: + shadow = self._allocate_shadow(obj) + # + # The answer is the address of the shadow. + return shadow + _find_shadow._dont_inline_ = True + + @specialize.arg(2) def id_or_identityhash(self, gcobj, is_hash): """Implement the common logic of id() and identityhash() of an object, given as a GCREF. @@ -1832,41 +1874,7 @@ # if self.is_valid_gc_object(obj): if self.is_in_nursery(obj): - # - # The object is not a tagged pointer, and it is still in the - # nursery. Find or allocate a "shadow" object, which is - # where the object will be moved by the next minor - # collection - if self.header(obj).tid & GCFLAG_HAS_SHADOW: - shadow = self.nursery_objects_shadows.get(obj) - ll_assert(shadow != NULL, - "GCFLAG_HAS_SHADOW but no shadow found") - else: - size_gc_header = self.gcheaderbuilder.size_gc_header - size = self.get_size(obj) - shadowhdr = self._malloc_out_of_nursery(size_gc_header + - size) - # Initialize the shadow enough to be considered a - # valid gc object. If the original object stays - # alive at the next minor collection, it will anyway - # be copied over the shadow and overwrite the - # following fields. But if the object dies, then - # the shadow will stay around and only be freed at - # the next major collection, at which point we want - # it to look valid (but ready to be freed). - shadow = shadowhdr + size_gc_header - self.header(shadow).tid = self.header(obj).tid - typeid = self.get_type_id(obj) - if self.is_varsize(typeid): - lenofs = self.varsize_offset_to_length(typeid) - (shadow + lenofs).signed[0] = (obj + lenofs).signed[0] - # - self.header(obj).tid |= GCFLAG_HAS_SHADOW - self.nursery_objects_shadows.setitem(obj, shadow) - # - # The answer is the address of the shadow. - obj = shadow - # + obj = self._find_shadow(obj) elif is_hash: if self.header(obj).tid & GCFLAG_HAS_SHADOW: # diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -424,7 +424,7 @@ self.identityhash_ptr = getfn(GCClass.identityhash.im_func, [s_gc, s_gcref], annmodel.SomeInteger(), - minimal_transform=False) + minimal_transform=False, inline=True) if getattr(GCClass, 'obtain_free_space', False): self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func, [s_gc, annmodel.SomeInteger()], @@ -433,7 +433,7 @@ if GCClass.moving_gc: self.id_ptr = getfn(GCClass.id.im_func, [s_gc, s_gcref], annmodel.SomeInteger(), - inline = False, + inline = True, minimal_transform = False) else: self.id_ptr = None From noreply at buildbot.pypy.org Mon Jul 1 17:42:27 2013 From: noreply at buildbot.pypy.org (untitaker) Date: Mon, 1 Jul 2013 17:42:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix link in documentation Message-ID: <20130701154227.A7FBB1C010B@cobra.cs.uni-duesseldorf.de> Author: Markus Unterwaditzer Branch: Changeset: r65145:ccc1d50f9af9 Date: 2013-07-01 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/ccc1d50f9af9/ Log: Fix link in documentation diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ From noreply at buildbot.pypy.org Mon Jul 1 17:42:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 1 Jul 2013 17:42:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in untitaker/pypy (pull request #158) Message-ID: <20130701154228.F10531C010B@cobra.cs.uni-duesseldorf.de> Author: arigo Branch: Changeset: r65146:4023fff525c0 Date: 2013-07-01 17:41 +0200 http://bitbucket.org/pypy/pypy/changeset/4023fff525c0/ Log: Merged in untitaker/pypy (pull request #158) Fix link in documentation diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ From noreply at buildbot.pypy.org Mon Jul 1 19:03:56 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 19:03:56 +0200 (CEST) Subject: [pypy-commit] pypy inline-identityhash: always inline that guy Message-ID: <20130701170356.F021F1C3022@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: inline-identityhash Changeset: r65147:e5c447a98e83 Date: 2013-07-01 19:03 +0200 http://bitbucket.org/pypy/pypy/changeset/e5c447a98e83/ Log: always inline that guy diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1892,6 +1892,7 @@ if is_hash: i = mangle_hash(i) return i + id_or_identityhash._always_inline_ = True def id(self, gcobj): return self.id_or_identityhash(gcobj, False) From noreply at buildbot.pypy.org Mon Jul 1 19:53:36 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 19:53:36 +0200 (CEST) Subject: [pypy-commit] pypy inline-identityhash: close to-be-merged branch Message-ID: <20130701175336.A45851C0168@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: inline-identityhash Changeset: r65148:0c75c6e3b735 Date: 2013-07-01 19:47 +0200 http://bitbucket.org/pypy/pypy/changeset/0c75c6e3b735/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Mon Jul 1 19:53:38 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 19:53:38 +0200 (CEST) Subject: [pypy-commit] pypy default: merge inline-identityhash - it gives a measurable speedup (~25%) on Message-ID: <20130701175338.18DEA1C0168@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65149:f75c60186757 Date: 2013-07-01 19:52 +0200 http://bitbucket.org/pypy/pypy/changeset/f75c60186757/ Log: merge inline-identityhash - it gives a measurable speedup (~25%) on microbenchmarks, like set comparison. Seems to have no real downsides diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -57,8 +57,8 @@ from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint from rpython.rlib.rarithmetic import LONG_BIT_SHIFT from rpython.rlib.debug import ll_assert, debug_print, debug_start, debug_stop -from rpython.rlib.objectmodel import we_are_translated -from rpython.tool.sourcetools import func_with_new_name +from rpython.rlib.objectmodel import specialize + # # Handles the objects in 2 generations: @@ -1824,6 +1824,48 @@ # ---------- # id() and identityhash() support + def _allocate_shadow(self, obj): + size_gc_header = self.gcheaderbuilder.size_gc_header + size = self.get_size(obj) + shadowhdr = self._malloc_out_of_nursery(size_gc_header + + size) + # Initialize the shadow enough to be considered a + # valid gc object. If the original object stays + # alive at the next minor collection, it will anyway + # be copied over the shadow and overwrite the + # following fields. But if the object dies, then + # the shadow will stay around and only be freed at + # the next major collection, at which point we want + # it to look valid (but ready to be freed). + shadow = shadowhdr + size_gc_header + self.header(shadow).tid = self.header(obj).tid + typeid = self.get_type_id(obj) + if self.is_varsize(typeid): + lenofs = self.varsize_offset_to_length(typeid) + (shadow + lenofs).signed[0] = (obj + lenofs).signed[0] + # + self.header(obj).tid |= GCFLAG_HAS_SHADOW + self.nursery_objects_shadows.setitem(obj, shadow) + return shadow + + def _find_shadow(self, obj): + # + # The object is not a tagged pointer, and it is still in the + # nursery. Find or allocate a "shadow" object, which is + # where the object will be moved by the next minor + # collection + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + shadow = self.nursery_objects_shadows.get(obj) + ll_assert(shadow != NULL, + "GCFLAG_HAS_SHADOW but no shadow found") + else: + shadow = self._allocate_shadow(obj) + # + # The answer is the address of the shadow. + return shadow + _find_shadow._dont_inline_ = True + + @specialize.arg(2) def id_or_identityhash(self, gcobj, is_hash): """Implement the common logic of id() and identityhash() of an object, given as a GCREF. @@ -1832,41 +1874,7 @@ # if self.is_valid_gc_object(obj): if self.is_in_nursery(obj): - # - # The object is not a tagged pointer, and it is still in the - # nursery. Find or allocate a "shadow" object, which is - # where the object will be moved by the next minor - # collection - if self.header(obj).tid & GCFLAG_HAS_SHADOW: - shadow = self.nursery_objects_shadows.get(obj) - ll_assert(shadow != NULL, - "GCFLAG_HAS_SHADOW but no shadow found") - else: - size_gc_header = self.gcheaderbuilder.size_gc_header - size = self.get_size(obj) - shadowhdr = self._malloc_out_of_nursery(size_gc_header + - size) - # Initialize the shadow enough to be considered a - # valid gc object. If the original object stays - # alive at the next minor collection, it will anyway - # be copied over the shadow and overwrite the - # following fields. But if the object dies, then - # the shadow will stay around and only be freed at - # the next major collection, at which point we want - # it to look valid (but ready to be freed). - shadow = shadowhdr + size_gc_header - self.header(shadow).tid = self.header(obj).tid - typeid = self.get_type_id(obj) - if self.is_varsize(typeid): - lenofs = self.varsize_offset_to_length(typeid) - (shadow + lenofs).signed[0] = (obj + lenofs).signed[0] - # - self.header(obj).tid |= GCFLAG_HAS_SHADOW - self.nursery_objects_shadows.setitem(obj, shadow) - # - # The answer is the address of the shadow. - obj = shadow - # + obj = self._find_shadow(obj) elif is_hash: if self.header(obj).tid & GCFLAG_HAS_SHADOW: # @@ -1884,6 +1892,7 @@ if is_hash: i = mangle_hash(i) return i + id_or_identityhash._always_inline_ = True def id(self, gcobj): return self.id_or_identityhash(gcobj, False) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -424,7 +424,7 @@ self.identityhash_ptr = getfn(GCClass.identityhash.im_func, [s_gc, s_gcref], annmodel.SomeInteger(), - minimal_transform=False) + minimal_transform=False, inline=True) if getattr(GCClass, 'obtain_free_space', False): self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func, [s_gc, annmodel.SomeInteger()], @@ -433,7 +433,7 @@ if GCClass.moving_gc: self.id_ptr = getfn(GCClass.id.im_func, [s_gc, s_gcref], annmodel.SomeInteger(), - inline = False, + inline = True, minimal_transform = False) else: self.id_ptr = None From noreply at buildbot.pypy.org Mon Jul 1 19:53:39 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 19:53:39 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130701175339.621881C0168@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65150:ecacece76689 Date: 2013-07-01 19:53 +0200 http://bitbucket.org/pypy/pypy/changeset/ecacece76689/ Log: merge diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -184,6 +184,7 @@ appleveldefs = {} interpleveldefs = { 'choose': 'interp_arrayops.choose', + 'put': 'interp_arrayops.put', 'repeat': 'interp_arrayops.repeat', } submodules = { diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -192,6 +192,61 @@ loop.choose(space, arr, choices, shape, dtype, out, MODES[mode]) return out + + at unwrap_spec(mode=str) +def put(space, w_arr, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy import constants + from pypy.module.micronumpy.support import int_w + + arr = convert_to_array(space, w_arr) + + if mode not in constants.MODES: + raise OperationError(space.w_ValueError, + space.wrap("mode %s not known" % (mode,))) + if not w_indices: + raise OperationError(space.w_ValueError, + space.wrap("indice list cannot be empty")) + if not w_values: + raise OperationError(space.w_ValueError, + space.wrap("value list cannot be empty")) + + dtype = arr.get_dtype() + + if space.isinstance_w(w_indices, space.w_list): + indices = space.listview(w_indices) + else: + indices = [w_indices] + + if space.isinstance_w(w_values, space.w_list): + values = space.listview(w_values) + else: + values = [w_values] + + v_idx = 0 + for idx in indices: + index = int_w(space, idx) + + if index < 0 or index >= arr.get_size(): + if constants.MODES[mode] == constants.MODE_RAISE: + raise OperationError(space.w_ValueError, space.wrap( + "invalid entry in choice array")) + elif constants.MODES[mode] == constants.MODE_WRAP: + index = index % arr.get_size() + else: + assert constants.MODES[mode] == constants.MODE_CLIP + if index < 0: + index = 0 + else: + index = arr.get_size() - 1 + + value = values[v_idx] + + if v_idx + 1 < len(values): + v_idx += 1 + + arr.setitem(space, [index], dtype.coerce(space, value)) + + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -550,9 +550,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ptp (peak to peak) not implemented yet")) - def descr_put(self, space, w_indices, w_values, w_mode='raise'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "put not implemented yet")) + @unwrap_spec(mode=str) + def descr_put(self, space, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy.interp_arrayops import put + put(space, self, w_indices, w_values, mode) def descr_resize(self, space, w_new_shape, w_refcheck=True): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -989,6 +990,7 @@ prod = interp2app(W_NDimArray.descr_prod), max = interp2app(W_NDimArray.descr_max), min = interp2app(W_NDimArray.descr_min), + put = interp2app(W_NDimArray.descr_put), argmax = interp2app(W_NDimArray.descr_argmax), argmin = interp2app(W_NDimArray.descr_argmin), all = interp2app(W_NDimArray.descr_all), diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -132,3 +132,26 @@ x = array([0, 0, 0], dtype='i2') r = array([2, 1, 0]).choose([a, b, c], out=x) assert r.dtype == 'i2' + + def test_put_basic(self): + from numpypy import arange, array + a = arange(5) + a.put([0, 2], [-44, -55]) + assert (a == array([-44, 1, -55, 3, 4])).all() + a = arange(5) + a.put([3, 4], 9) + assert (a == array([0, 1, 2, 9, 9])).all() + a = arange(5) + a.put(1, [7, 8]) + assert (a == array([0, 7, 2, 3, 4])).all() + + def test_put_modes(self): + from numpypy import array, arange + a = arange(5) + a.put(22, -5, mode='clip') + assert (a == array([0, 1, 2, 3, -5])).all() + a = arange(5) + a.put(22, -5, mode='wrap') + assert (a == array([0, 1, -5, 3, 4])).all() + raises(ValueError, "arange(5).put(22, -5, mode='raise')") + raises(ValueError, "arange(5).put(22, -5, mode='wrongmode')") diff --git a/rpython/translator/platform/arm.py b/rpython/translator/platform/arm.py --- a/rpython/translator/platform/arm.py +++ b/rpython/translator/platform/arm.py @@ -17,6 +17,7 @@ class ARM(Linux): name = "arm" + shared_only = ('-fPIC',) available_librarydirs = [SB2 + '/lib/arm-linux-gnueabi/', SB2 + '/lib/arm-linux-gnueabihf/', From noreply at buildbot.pypy.org Mon Jul 1 19:56:17 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 1 Jul 2013 19:56:17 +0200 (CEST) Subject: [pypy-commit] pypy default: document the branch Message-ID: <20130701175617.478571C0168@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65151:a4b1d73be06f Date: 2013-07-01 19:55 +0200 http://bitbucket.org/pypy/pypy/changeset/a4b1d73be06f/ Log: document the branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -67,3 +67,6 @@ .. branch: identity-set Faster sets for objects + +.. branch: inline-identityhash +Inline the fast path of id() and hash() From noreply at buildbot.pypy.org Mon Jul 1 20:30:45 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 1 Jul 2013 20:30:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20130701183045.E0FE51C010B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65152:6be53cd7a876 Date: 2013-07-01 11:30 -0700 http://bitbucket.org/pypy/pypy/changeset/6be53cd7a876/ Log: 2to3 diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -25,8 +25,7 @@ ('gr_mem', POINTER(c_char_p)), ) -class struct_group: - __metaclass__ = _structseq.structseqtype +class struct_group(metaclass=_structseq.structseqtype): gr_name = _structseq.structseqfield(0) gr_passwd = _structseq.structseqfield(1) From noreply at buildbot.pypy.org Mon Jul 1 23:01:12 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 1 Jul 2013 23:01:12 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-segfault: a test that segfaults Message-ID: <20130701210112.5ADDE1C301A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-segfault Changeset: r65153:f669a32fe79d Date: 2013-06-30 22:56 +0300 http://bitbucket.org/pypy/pypy/changeset/f669a32fe79d/ Log: a test that segfaults diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -293,6 +293,17 @@ b = array(a, copy=False, ndmin=4) b[0,0,0,0] = 0 assert a[0, 0] == 0 + a = array([[[]]]) + # Simulate tiling an empty array, really tests repeat, reshape + # b = tile(a, (3, 2, 5)) + reps = (3, 4, 5) + c = array(a, copy=False, subok=True, ndmin=len(reps)) + print '1',c,c.shape + d = c.reshape(3, 4, 0) + print '2',c,c.shape + e = d.repeat(3, 0) + print '3',c,c.shape,e.shape + assert e.shape == (9, 4, 0) def test_type(self): from numpypy import array From noreply at buildbot.pypy.org Mon Jul 1 23:01:13 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 1 Jul 2013 23:01:13 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-segfault: do not iter over empty arrays Message-ID: <20130701210113.C76E51C301A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-segfault Changeset: r65154:863f80543401 Date: 2013-07-01 17:44 +0300 http://bitbucket.org/pypy/pypy/changeset/863f80543401/ Log: do not iter over empty arrays diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -65,7 +65,7 @@ [ 3., 4., -1.], [-1., -1., -1.]]) - + NOTE: support for not passing x and y is unsupported """ if space.is_none(w_y): @@ -122,10 +122,10 @@ for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("record type mismatch")) elif dtype.is_record_type() or a_dt.is_record_type(): - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -46,6 +46,7 @@ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.arrayimpl import base +from pypy.module.micronumpy.support import product from rpython.rlib import jit # structures to describe slicing @@ -225,7 +226,7 @@ self.shape = shape self.offset = start self.shapelen = len(shape) - self._done = False + self._done = self.shapelen == 0 or product(shape) == 0 self.strides = strides self.backstrides = backstrides self.size = array.size From noreply at buildbot.pypy.org Mon Jul 1 23:01:15 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 1 Jul 2013 23:01:15 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-segfault: test, fix creating empty axis iterator Message-ID: <20130701210115.536981C301A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-segfault Changeset: r65155:df376dab84d2 Date: 2013-07-02 00:00 +0300 http://bitbucket.org/pypy/pypy/changeset/df376dab84d2/ Log: test, fix creating empty axis iterator diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -285,7 +285,7 @@ self.backstrides = backstrides[:dim] + [0] + backstrides[dim:] self.first_line = True self.indices = [0] * len(shape) - self._done = False + self._done = array.get_size() == 0 self.offset = array.start self.dim = dim self.array = array diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -298,11 +298,8 @@ # b = tile(a, (3, 2, 5)) reps = (3, 4, 5) c = array(a, copy=False, subok=True, ndmin=len(reps)) - print '1',c,c.shape d = c.reshape(3, 4, 0) - print '2',c,c.shape e = d.repeat(3, 0) - print '3',c,c.shape,e.shape assert e.shape == (9, 4, 0) def test_type(self): @@ -2573,6 +2570,9 @@ a = array(range(100) + range(100) + range(100)) b = a.argsort() assert (b[:3] == [0, 100, 200]).all() + a = array([[[]]]).reshape(3,4,0) + b = a.argsort() + assert b.size == 0 def test_argsort_random(self): from numpypy import array From noreply at buildbot.pypy.org Mon Jul 1 23:39:11 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 1 Jul 2013 23:39:11 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Fix tests: in the py3k branch, we parse unicode strings Message-ID: <20130701213911.C43AB1C010B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r65156:18f3a937ce30 Date: 2013-07-01 23:38 +0200 http://bitbucket.org/pypy/pypy/changeset/18f3a937ce30/ Log: Fix tests: in the py3k branch, we parse unicode strings and 'L' suffix is not allowed. diff --git a/rpython/rlib/test/test_rarithmetic.py b/rpython/rlib/test/test_rarithmetic.py --- a/rpython/rlib/test/test_rarithmetic.py +++ b/rpython/rlib/test/test_rarithmetic.py @@ -418,49 +418,49 @@ class TestStringToInt: def test_string_to_int(self): - cases = [('0', 0), - ('1', 1), - ('9', 9), - ('10', 10), - ('09', 9), - ('0000101', 101), # not octal unless base 0 or 8 - ('5123', 5123), - (' 0', 0), - ('0 ', 0), - (' \t \n 32313 \f \v \r \n\r ', 32313), - ('+12', 12), - ('-5', -5), - ('- 5', -5), - ('+ 5', 5), - (' -123456789 ', -123456789), + cases = [(u'0', 0), + (u'1', 1), + (u'9', 9), + (u'10', 10), + (u'09', 9), + (u'0000101', 101), # not octal unless base 0 or 8 + (u'5123', 5123), + (u' 0', 0), + (u'0 ', 0), + (u' \t \n 32313 \f \v \r \n\r ', 32313), + (u'+12', 12), + (u'-5', -5), + (u'- 5', -5), + (u'+ 5', 5), + (u' -123456789 ', -123456789), ] for s, expected in cases: assert string_to_int(s) == expected #assert string_to_bigint(s).tolong() == expected def test_string_to_int_base(self): - cases = [('111', 2, 7), - ('010', 2, 2), - ('102', 3, 11), - ('103', 4, 19), - ('107', 8, 71), - ('109', 10, 109), - ('10A', 11, 131), - ('10a', 11, 131), - ('10f', 16, 271), - ('10F', 16, 271), - ('0x10f', 16, 271), - ('0x10F', 16, 271), - ('10z', 36, 1331), - ('10Z', 36, 1331), - ('12', 0, 12), - ('015', 0, 13), - ('0x10', 0, 16), - ('0XE', 0, 14), - ('0', 0, 0), - ('0b11', 2, 3), - ('0B10', 2, 2), - ('0o77', 8, 63), + cases = [(u'111', 2, 7), + (u'010', 2, 2), + (u'102', 3, 11), + (u'103', 4, 19), + (u'107', 8, 71), + (u'109', 10, 109), + (u'10A', 11, 131), + (u'10a', 11, 131), + (u'10f', 16, 271), + (u'10F', 16, 271), + (u'0x10f', 16, 271), + (u'0x10F', 16, 271), + (u'10z', 36, 1331), + (u'10Z', 36, 1331), + (u'12', 0, 12), + (u'015', 0, 13), + (u'0x10', 0, 16), + (u'0XE', 0, 14), + (u'0', 0, 0), + (u'0b11', 2, 3), + (u'0B10', 2, 2), + (u'0o77', 8, 63), ] for s, base, expected in cases: assert string_to_int(s, base) == expected @@ -471,21 +471,21 @@ assert string_to_int('-'+s+' ', base) == -expected def test_string_to_int_error(self): - cases = ['0x123', # must use base 0 or 16 - ' 0X12 ', - '0b01', - '0o01', - '', - '++12', - '+-12', - '-+12', - '--12', - '12a6', - '12A6', - 'f', - 'Z', - '.', - '@', + cases = [u'0x123', # must use base 0 or 16 + u' 0X12 ', + u'0b01', + u'0o01', + u'', + u'++12', + u'+-12', + u'-+12', + u'--12', + u'12a6', + u'12A6', + u'f', + u'Z', + u'.', + u'@', ] for s in cases: py.test.raises(ParseStringError, string_to_int, s) @@ -493,39 +493,39 @@ py.test.raises(ParseStringError, string_to_int, s+' ') py.test.raises(ParseStringError, string_to_int, '+'+s) py.test.raises(ParseStringError, string_to_int, '-'+s) - py.test.raises(ParseStringError, string_to_int, '0x', 16) - py.test.raises(ParseStringError, string_to_int, '-0x', 16) + py.test.raises(ParseStringError, string_to_int, u'0x', 16) + py.test.raises(ParseStringError, string_to_int, u'-0x', 16) - exc = py.test.raises(ParseStringError, string_to_int, '') + exc = py.test.raises(ParseStringError, string_to_int, u'') assert exc.value.msg == "invalid literal for int() with base 10: ''" - exc = py.test.raises(ParseStringError, string_to_int, '', 0) + exc = py.test.raises(ParseStringError, string_to_int, u'', 0) assert exc.value.msg == "invalid literal for int() with base 0: ''" def test_string_to_int_overflow(self): import sys py.test.raises(ParseStringOverflowError, string_to_int, - str(sys.maxint*17)) + unicode(sys.maxint*17)) def test_string_to_int_not_overflow(self): import sys for x in [-sys.maxint-1, sys.maxint]: - y = string_to_int(str(x)) + y = string_to_int(unicode(x)) assert y == x def test_string_to_int_base_error(self): - cases = [('1', 1), - ('1', 37), - ('a', 0), - ('9', 9), - ('0x123', 7), - ('145cdf', 15), - ('12', 37), - ('12', 98172), - ('12', -1), - ('12', -908), - ('12.3', 10), - ('12.3', 13), - ('12.3', 16), + cases = [(u'1', 1), + (u'1', 37), + (u'a', 0), + (u'9', 9), + (u'0x123', 7), + (u'145cdf', 15), + (u'12', 37), + (u'12', 98172), + (u'12', -1), + (u'12', -908), + (u'12.3', 10), + (u'12.3', 13), + (u'12.3', 16), ] for s, base in cases: py.test.raises(ParseStringError, string_to_int, s, base) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -212,19 +212,21 @@ def test_fromstr(self): from rpython.rlib.rstring import ParseStringError - assert rbigint.fromstr('123L').tolong() == 123 - assert rbigint.fromstr('123L ').tolong() == 123 - py.test.raises(ParseStringError, rbigint.fromstr, 'L') - py.test.raises(ParseStringError, rbigint.fromstr, 'L ') - assert rbigint.fromstr('123L', 4).tolong() == 27 - assert rbigint.fromstr('123L', 30).tolong() == 27000 + 1800 + 90 + 21 - assert rbigint.fromstr('123L', 22).tolong() == 10648 + 968 + 66 + 21 - assert rbigint.fromstr('123L', 21).tolong() == 441 + 42 + 3 - assert rbigint.fromstr('1891234174197319').tolong() == 1891234174197319 + assert rbigint.fromstr(u'123').tolong() == 123 + assert rbigint.fromstr(u'123 ').tolong() == 123 + py.test.raises(ParseStringError, rbigint.fromstr, u'123L') + py.test.raises(ParseStringError, rbigint.fromstr, u'123L ') + py.test.raises(ParseStringError, rbigint.fromstr, u'L') + py.test.raises(ParseStringError, rbigint.fromstr, u'L ') + assert rbigint.fromstr(u'123', 4).tolong() == 27 + assert rbigint.fromstr(u'123L', 30).tolong() == 27000 + 1800 + 90 + 21 + assert rbigint.fromstr(u'123L', 22).tolong() == 10648 + 968 + 66 + 21 + py.test.raises(ParseStringError, rbigint.fromstr, u'123L', 21) + assert rbigint.fromstr(u'1891234174197319').tolong() == 1891234174197319 def test_from_numberstring_parser(self): from rpython.rlib.rstring import NumberStringParser - parser = NumberStringParser("1231231241", "1231231241", 10, "long") + parser = NumberStringParser(u"1231231241", u"1231231241", 10, u"long") assert rbigint._from_numberstring_parser(parser).tolong() == 1231231241 def test_add(self): diff --git a/rpython/rlib/test/test_rfloat.py b/rpython/rlib/test/test_rfloat.py --- a/rpython/rlib/test/test_rfloat.py +++ b/rpython/rlib/test/test_rfloat.py @@ -239,19 +239,19 @@ def test_string_to_float(): from rpython.rlib.rstring import ParseStringError import random - assert string_to_float('0') == 0.0 - assert string_to_float('1') == 1.0 - assert string_to_float('-1.5') == -1.5 - assert string_to_float('1.5E2') == 150.0 - assert string_to_float('2.5E-1') == 0.25 - assert string_to_float('1e1111111111111') == float('1e1111111111111') - assert string_to_float('1e-1111111111111') == float('1e-1111111111111') - assert string_to_float('-1e1111111111111') == float('-1e1111111111111') - assert string_to_float('-1e-1111111111111') == float('-1e-1111111111111') - assert string_to_float('1e111111111111111111111') == float('1e111111111111111111111') - assert string_to_float('1e-111111111111111111111') == float('1e-111111111111111111111') - assert string_to_float('-1e111111111111111111111') == float('-1e111111111111111111111') - assert string_to_float('-1e-111111111111111111111') == float('-1e-111111111111111111111') + assert string_to_float(u'0') == 0.0 + assert string_to_float(u'1') == 1.0 + assert string_to_float(u'-1.5') == -1.5 + assert string_to_float(u'1.5E2') == 150.0 + assert string_to_float(u'2.5E-1') == 0.25 + assert string_to_float(u'1e1111111111111') == float('1e1111111111111') + assert string_to_float(u'1e-1111111111111') == float('1e-1111111111111') + assert string_to_float(u'-1e1111111111111') == float('-1e1111111111111') + assert string_to_float(u'-1e-1111111111111') == float('-1e-1111111111111') + assert string_to_float(u'1e111111111111111111111') == float('1e111111111111111111111') + assert string_to_float(u'1e-111111111111111111111') == float('1e-111111111111111111111') + assert string_to_float(u'-1e111111111111111111111') == float('-1e111111111111111111111') + assert string_to_float(u'-1e-111111111111111111111') == float('-1e-111111111111111111111') valid_parts = [['', ' ', ' \f\n\r\t\v'], ['', '+', '-'], @@ -273,7 +273,7 @@ for part2 in valid_parts[2]: for part3 in valid_parts[3]: for part4 in valid_parts[4]: - s = part0+part1+part2+part3+part4 + s = unicode(part0+part1+part2+part3+part4) assert (abs(string_to_float(s) - float(s)) <= 1E-13 * abs(float(s))) @@ -282,8 +282,8 @@ for i in range(20): parts = [random.choice(lst) for lst in valid_parts] parts[j] = invalid - s = ''.join(parts) + s = u''.join(parts) print repr(s) if s.strip(): # empty s raises OperationError directly py.test.raises(ParseStringError, string_to_float, s) - py.test.raises(ParseStringError, string_to_float, "") + py.test.raises(ParseStringError, string_to_float, u"") From noreply at buildbot.pypy.org Tue Jul 2 01:19:18 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 2 Jul 2013 01:19:18 +0200 (CEST) Subject: [pypy-commit] pypy py3k: we don't actually utf8 encode here, so lighten the expectation to a ValueError Message-ID: <20130701231918.D5DDE1C010B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65157:ce22f20c0ada Date: 2013-07-01 16:18 -0700 http://bitbucket.org/pypy/pypy/changeset/ce22f20c0ada/ Log: we don't actually utf8 encode here, so lighten the expectation to a ValueError diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -446,7 +446,7 @@ def test_from_string(self): raises(ValueError, float, "\0") - raises(UnicodeEncodeError, float, '\uD8F0') + raises(ValueError, float, '\uD8F0') def test_format(self): f = 1.1234e200 From noreply at buildbot.pypy.org Tue Jul 2 01:35:09 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 2 Jul 2013 01:35:09 +0200 (CEST) Subject: [pypy-commit] pypy default: py3k compat Message-ID: <20130701233509.1E1D31C0168@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65158:7a85cc22a4c9 Date: 2013-07-01 16:28 -0700 http://bitbucket.org/pypy/pypy/changeset/7a85cc22a4c9/ Log: py3k compat diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -132,7 +132,8 @@ _tls.main = gmain _tls.current = gmain -def _greenlet_start(greenlet, (args, kwds)): +def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: res = greenlet.run(*args, **kwds) From noreply at buildbot.pypy.org Tue Jul 2 01:35:10 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 2 Jul 2013 01:35:10 +0200 (CEST) Subject: [pypy-commit] pypy default: whitespace Message-ID: <20130701233510.8FAB41C0168@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65159:2ee41b828b47 Date: 2013-07-01 16:32 -0700 http://bitbucket.org/pypy/pypy/changeset/2ee41b828b47/ Log: whitespace diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py --- a/pypy/objspace/std/test/test_identityset.py +++ b/pypy/objspace/std/test/test_identityset.py @@ -1,23 +1,22 @@ import py +class AppTestIdentitySet(object): -class AppTestIdentitySet(object): - - #needed for compares_by_identity + # needed for compares_by_identity spaceconfig = {"objspace.std.withidentitydict": True} - + def setup_class(cls): from pypy.objspace.std import identitydict if cls.runappdirect: py.test.skip("interp2app doesn't work on appdirect") - + def w_uses_strategy(self, s , obj): import __pypy__ return s in __pypy__.internal_repr(obj) - + def test_use_identity_strategy(self): - + class Plain(object): pass @@ -32,113 +31,113 @@ class CustomHash(object): def __hash__(self): return 0 - + s = set() - + assert not self.uses_strategy('IdentitySetStrategy',s) - + s.add(Plain()) - + assert self.uses_strategy('IdentitySetStrategy',s) - + for cls in [CustomEq,CustomCmp,CustomHash]: s = set() s.add(cls()) assert not self.uses_strategy('IdentitySetStrategy',s) - - + + def test_use_identity_strategy_list(self): - + class X(object): pass - + assert self.uses_strategy('IdentitySetStrategy',set([X(),X()])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) - + def test_identity_strategy_add(self): - + class X(object): pass - + class NotIdent(object): def __eq__(self,other): pass - + s = set([X(),X()]) s.add('foo') assert not self.uses_strategy('IdentitySetStrategy',s) s = set([X(),X()]) s.add(NotIdent()) assert not self.uses_strategy('IdentitySetStrategy',s) - + def test_identity_strategy_sanity(self): - + class X(object): pass - + class Y(object): pass - + a,b,c,d,e,f = X(),Y(),X(),Y(),X(),Y() - + s = set([a,b]).union(set([c])) - assert self.uses_strategy('IdentitySetStrategy',s) + assert self.uses_strategy('IdentitySetStrategy',s) assert set([a,b,c]) == s s = set([a,b,c,d,e,f]) - set([d,e,f]) assert self.uses_strategy('IdentitySetStrategy',s) assert set([a,b,c]) == s - - + + s = set([a]) s.update([b,c]) - + assert s == set([a,b,c]) - - + + def test_identity_strategy_iterators(self): - + class X(object): pass - + s = set([X() for i in range(10)]) counter = 0 for item in s: counter += 1 assert item in s - + assert counter == 10 - - + + def test_identity_strategy_other_cmp(self): - - #test tries to hit positive and negative in + + # test tries to hit positive and negative in # may_contain_equal_elements - + class X(object): pass - - s = set([X() for i in range(10)]) - + + s = set([X() for i in range(10)]) + assert s.intersection(set([1,2,3])) == set() assert s.intersection(set(['a','b','c'])) == set() assert s.intersection(set(['a','b','c'])) == set() assert s.intersection(set([X(),X()])) == set() - + other = set(['a','b','c',s.__iter__().next()]) intersect = s.intersection(other) assert len(intersect) == 1 assert intersect.__iter__().next() in s assert intersect.__iter__().next() in other - + def test_class_monkey_patch(self): - + class X(object): pass - + s = set() - + s.add(X()) assert self.uses_strategy('IdentitySetStrategy',s) X.__eq__ = lambda self,other : None @@ -148,47 +147,47 @@ assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) - - # An interesting case, add an instance, mutate the class, + + # An interesting case, add an instance, mutate the class, # then add the same instance. - + class X(object): pass - + s = set() inst = X() s.add(inst) X.__eq__ = lambda x,y : x is y s.add(inst) - + assert len(s) == 1 assert s.__iter__().next() is inst assert not self.uses_strategy('IdentitySetStrategy',s) - - - #Add instance, mutate class, check membership of that instance. - + + + # Add instance, mutate class, check membership of that instance. + class X(object): pass - - + + inst = X() s = set() s.add(inst) X.__eq__ = lambda x,y : x is y assert inst in s - + # Test Wrong strategy # If the strategy is changed by mutation, but the instance # does not change, then this tests the methods that call # may_contain_equal_elements still function. # i.e. same instance in two sets, one with object strategy, one with # identity strategy. - + class X(object): pass - - + + inst = X() s1 = set() s1.add(inst) @@ -197,13 +196,10 @@ s2 = set() s2.add(inst) assert not self.uses_strategy('IdentitySetStrategy',s2) - + assert s1.intersection(s2) == set([inst]) assert (s1 - s2) == set() assert (s2 - s1) == set() - + s1.difference_update(s2) assert s1 == set() - - - From noreply at buildbot.pypy.org Tue Jul 2 01:35:12 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 2 Jul 2013 01:35:12 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130701233512.933A01C0168@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65160:98718a061cac Date: 2013-07-01 16:34 -0700 http://bitbucket.org/pypy/pypy/changeset/98718a061cac/ Log: merge default diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -132,7 +132,8 @@ _tls.main = gmain _tls.current = gmain -def _greenlet_start(greenlet, (args, kwds)): +def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: res = greenlet.run(*args, **kwds) diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -65,3 +65,8 @@ .. branch: ndarray-view Add view to ndarray and zeroD arrays, not on dtype scalars yet +.. branch: identity-set +Faster sets for objects + +.. branch: inline-identityhash +Inline the fast path of id() and hash() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -184,6 +184,7 @@ appleveldefs = {} interpleveldefs = { 'choose': 'interp_arrayops.choose', + 'put': 'interp_arrayops.put', 'repeat': 'interp_arrayops.repeat', } submodules = { diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -13,6 +13,9 @@ def next(self): self.called_once = True + def next_skip_x(self, n): + self.called_once = True + def getitem(self): return self.v.get_scalar_value() diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -192,6 +192,61 @@ loop.choose(space, arr, choices, shape, dtype, out, MODES[mode]) return out + + at unwrap_spec(mode=str) +def put(space, w_arr, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy import constants + from pypy.module.micronumpy.support import int_w + + arr = convert_to_array(space, w_arr) + + if mode not in constants.MODES: + raise OperationError(space.w_ValueError, + space.wrap("mode %s not known" % (mode,))) + if not w_indices: + raise OperationError(space.w_ValueError, + space.wrap("indice list cannot be empty")) + if not w_values: + raise OperationError(space.w_ValueError, + space.wrap("value list cannot be empty")) + + dtype = arr.get_dtype() + + if space.isinstance_w(w_indices, space.w_list): + indices = space.listview(w_indices) + else: + indices = [w_indices] + + if space.isinstance_w(w_values, space.w_list): + values = space.listview(w_values) + else: + values = [w_values] + + v_idx = 0 + for idx in indices: + index = int_w(space, idx) + + if index < 0 or index >= arr.get_size(): + if constants.MODES[mode] == constants.MODE_RAISE: + raise OperationError(space.w_ValueError, space.wrap( + "invalid entry in choice array")) + elif constants.MODES[mode] == constants.MODE_WRAP: + index = index % arr.get_size() + else: + assert constants.MODES[mode] == constants.MODE_CLIP + if index < 0: + index = 0 + else: + index = arr.get_size() - 1 + + value = values[v_idx] + + if v_idx + 1 < len(values): + v_idx += 1 + + arr.setitem(space, [index], dtype.coerce(space, value)) + + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -550,9 +550,10 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ptp (peak to peak) not implemented yet")) - def descr_put(self, space, w_indices, w_values, w_mode='raise'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "put not implemented yet")) + @unwrap_spec(mode=str) + def descr_put(self, space, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy.interp_arrayops import put + put(space, self, w_indices, w_values, mode) def descr_resize(self, space, w_new_shape, w_refcheck=True): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -630,7 +631,7 @@ old_itemsize = self.get_dtype().get_size() new_itemsize = dtype.get_size() impl = self.implementation - new_shape = self.get_shape() + new_shape = self.get_shape()[:] dims = len(new_shape) if dims == 0: # Cannot resize scalars @@ -989,6 +990,7 @@ prod = interp2app(W_NDimArray.descr_prod), max = interp2app(W_NDimArray.descr_max), min = interp2app(W_NDimArray.descr_min), + put = interp2app(W_NDimArray.descr_put), argmax = interp2app(W_NDimArray.descr_argmax), argmin = interp2app(W_NDimArray.descr_argmin), all = interp2app(W_NDimArray.descr_all), diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -37,7 +37,7 @@ we can go faster. All the calculations happen in next() -next_skip_x() tries to do the iteration for a number of steps at once, +next_skip_x(steps) tries to do the iteration for a number of steps at once, but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -132,3 +132,26 @@ x = array([0, 0, 0], dtype='i2') r = array([2, 1, 0]).choose([a, b, c], out=x) assert r.dtype == 'i2' + + def test_put_basic(self): + from numpypy import arange, array + a = arange(5) + a.put([0, 2], [-44, -55]) + assert (a == array([-44, 1, -55, 3, 4])).all() + a = arange(5) + a.put([3, 4], 9) + assert (a == array([0, 1, 2, 9, 9])).all() + a = arange(5) + a.put(1, [7, 8]) + assert (a == array([0, 7, 2, 3, 4])).all() + + def test_put_modes(self): + from numpypy import array, arange + a = arange(5) + a.put(22, -5, mode='clip') + assert (a == array([0, 1, 2, 3, -5])).all() + a = arange(5) + a.put(22, -5, mode='wrap') + assert (a == array([0, 1, -5, 3, 4])).all() + raises(ValueError, "arange(5).put(22, -5, mode='raise')") + raises(ValueError, "arange(5).put(22, -5, mode='wrongmode')") diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -1,4 +1,5 @@ from pypy.module.micronumpy.iter import MultiDimViewIterator +from pypy.module.micronumpy.arrayimpl.scalar import ScalarIterator class MockArray(object): size = 1 @@ -8,7 +9,7 @@ #Let's get started, simple iteration in C order with #contiguous layout => strides[-1] is 1 start = 0 - shape = [3, 5] + shape = [3, 5] strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] @@ -47,7 +48,7 @@ #iteration in C order with #contiguous layout => strides[-1] is 1 #skip less than the shape start = 0 - shape = [3, 5] + shape = [3, 5] strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] @@ -89,3 +90,9 @@ assert i.indexes == [0,1] assert i.offset == 3 assert i.done() + + def test_scalar_iter(self): + i = ScalarIterator(MockArray) + i.next() + i.next_skip_x(3) + assert i.done() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1411,16 +1411,17 @@ assert a[3].imag == -10 assert a[2].imag == -5 - def test_ndarray_view(self): + def test_view(self): from numpypy import array, int8, int16, dtype - x = array([(1, 2)], dtype=[('a', int8), ('b', int8)]) + x = array((1, 2), dtype=int8) + assert x.shape == (2,) y = x.view(dtype=int16) - print y,y.shape + assert x.shape == (2,) assert y[0] == 513 assert y.dtype == dtype('int16') y[0] = 670 - assert x['a'] == -98 - assert x['b'] == 2 + assert x[0] == -98 + assert x[1] == 2 f = array([1000, -1234], dtype='i4') nnp = self.non_native_prefix d = f.view(dtype=nnp + 'i4') diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -201,10 +201,27 @@ def main(n): i = 0 while i < n: - s = set([1,2,3]) + s = set([1, 2, 3]) i += 1 log = self.run(main, [1000]) assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + + def test_specialised_tuple(self): + def main(n): + import pypyjit + + f = lambda: None + tup = (n, n) + while n > 0: + tup[0] # ID: getitem + pypyjit.residual_call(f) + n -= 1 + + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem') + assert log.opnames(ops) == [] diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -32,14 +32,6 @@ return d -def test_syslog(): - try: - import lib_pypy.syslog - except ImportError: - py.test.skip('no syslog on this platform') - d = run('syslog.ctc.py', '_syslog_cache.py') - assert 'LOG_NOTICE' in d - def test_resource(): try: import lib_pypy.resource diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -7,7 +7,7 @@ def test_basic(): g = grp.getgrnam("root") assert g.gr_gid == 0 - assert g.gr_mem == ['root'] + assert g.gr_mem == ['root'] or g.gr_mem == [] assert g.gr_name == 'root' assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) diff --git a/pypy/module/test_lib_pypy/test_syslog.py b/pypy/module/test_lib_pypy/test_syslog.py --- a/pypy/module/test_lib_pypy/test_syslog.py +++ b/pypy/module/test_lib_pypy/test_syslog.py @@ -1,8 +1,12 @@ -import py +import sys, py try: from lib_pypy import syslog except ImportError: py.test.skip('no syslog on this platform') +except AssertionError: + if '__pypy__' in sys.builtin_module_names: + raise + py.test.skip('AssertionError during import (wrong cffi version?)') # XXX very minimal test diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -26,7 +26,7 @@ def __repr__(self): """representation for debugging purposes""" reprlist = [repr(w_item) for w_item in self.getkeys()] - return "<%s(%s)>" % (self.__class__.__name__, ', '.join(reprlist)) + return "<%s(%s)(%s)>" % (self.__class__.__name__, self.strategy, ', '.join(reprlist)) def from_storage_and_strategy(self, storage, strategy): obj = self._newobj(self.space, None) @@ -759,6 +759,8 @@ strategy = self.space.fromcache(StringSetStrategy) elif type(w_key) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeSetStrategy) + elif self.space.type(w_key).compares_by_identity(): + strategy = self.space.fromcache(IdentitySetStrategy) else: strategy = self.space.fromcache(ObjectSetStrategy) w_set.strategy = strategy @@ -918,6 +920,13 @@ def equals(self, w_set, w_other): if w_set.length() != w_other.length(): return False + if w_set.length() == 0: + return True + # it's possible to have 0-lenght strategy that's not empty + if w_set.strategy is w_other.strategy: + return self._issubset_unwrapped(w_set, w_other) + if not self.may_contain_equal_elements(w_other.strategy): + return False items = self.unerase(w_set.sstorage).keys() for key in items: if not w_other.has_key(self.wrap(key)): @@ -1187,7 +1196,9 @@ def may_contain_equal_elements(self, strategy): if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(EmptySetStrategy): + elif strategy is self.space.fromcache(EmptySetStrategy): + return False + elif strategy is self.space.fromcache(IdentitySetStrategy): return False return True @@ -1221,7 +1232,9 @@ def may_contain_equal_elements(self, strategy): if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(EmptySetStrategy): + elif strategy is self.space.fromcache(EmptySetStrategy): + return False + elif strategy is self.space.fromcache(IdentitySetStrategy): return False return True @@ -1255,9 +1268,11 @@ def may_contain_equal_elements(self, strategy): if strategy is self.space.fromcache(UnicodeSetStrategy): return False - if strategy is self.space.fromcache(UnicodeSetStrategy): + elif strategy is self.space.fromcache(UnicodeSetStrategy): return False - if strategy is self.space.fromcache(EmptySetStrategy): + elif strategy is self.space.fromcache(EmptySetStrategy): + return False + elif strategy is self.space.fromcache(IdentitySetStrategy): return False return True @@ -1315,6 +1330,41 @@ break d_obj[w_item] = None +class IdentitySetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("identityset") + erase = staticmethod(erase) + unerase = staticmethod(unerase) + + def get_empty_storage(self): + return self.erase({}) + + def get_empty_dict(self): + return {} + + def is_correct_type(self, w_key): + w_type = self.space.type(w_key) + return w_type.compares_by_identity() + + def may_contain_equal_elements(self, strategy): + #empty first, probably more likely + if strategy is self.space.fromcache(EmptySetStrategy): + return False + if strategy is self.space.fromcache(IntegerSetStrategy): + return False + if strategy is self.space.fromcache(StringSetStrategy): + return False + if strategy is self.space.fromcache(UnicodeSetStrategy): + return False + return True + + def unwrap(self, w_item): + return w_item + + def wrap(self, item): + return item + + def iter(self, w_set): + return IdentityIteratorImplementation(self.space, self, w_set) class IteratorImplementation(object): def __init__(self, space, strategy, implementation): @@ -1406,6 +1456,17 @@ else: return None +class IdentityIteratorImplementation(IteratorImplementation): + def __init__(self, space, strategy, w_set): + IteratorImplementation.__init__(self, space, strategy, w_set) + d = strategy.unerase(w_set.sstorage) + self.iterator = d.iterkeys() + + def next_entry(self): + for key in self.iterator: + return self.space.wrap(key) + else: + return None class RDictIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, w_set): @@ -1525,6 +1586,15 @@ w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return + # check for compares by identity + for w_item in iterable_w: + if not space.type(w_item).compares_by_identity(): + break + else: + w_set.strategy = space.fromcache(IdentitySetStrategy) + w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + return + w_set.strategy = space.fromcache(ObjectSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -18,6 +18,8 @@ iter_n = unrolling_iterable(range(typelen)) class cls(W_AbstractTupleObject): + _immutable_fields_ = ['value%s' % i for i in iter_n] + def __init__(self, space, *values_w): self.space = space assert len(values_w) == typelen diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py new file mode 100644 --- /dev/null +++ b/pypy/objspace/std/test/test_identityset.py @@ -0,0 +1,205 @@ +import py + + +class AppTestIdentitySet(object): + + # needed for compares_by_identity + spaceconfig = {"objspace.std.withidentitydict": True} + + def setup_class(cls): + from pypy.objspace.std import identitydict + if cls.runappdirect: + py.test.skip("interp2app doesn't work on appdirect") + + def w_uses_strategy(self, s , obj): + import __pypy__ + return s in __pypy__.internal_repr(obj) + + def test_use_identity_strategy(self): + + class Plain(object): + pass + + class CustomEq(object): + def __eq__(self, other): + return True + + class CustomCmp (object): + def __cmp__(self, other): + return 0 + + class CustomHash(object): + def __hash__(self): + return 0 + + s = set() + + assert not self.uses_strategy('IdentitySetStrategy',s) + + s.add(Plain()) + + assert self.uses_strategy('IdentitySetStrategy',s) + + for cls in [CustomEq,CustomCmp,CustomHash]: + s = set() + s.add(cls()) + assert not self.uses_strategy('IdentitySetStrategy',s) + + + def test_use_identity_strategy_list(self): + + class X(object): + pass + + assert self.uses_strategy('IdentitySetStrategy',set([X(),X()])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) + + def test_identity_strategy_add(self): + + class X(object): + pass + + class NotIdent(object): + def __eq__(self,other): + pass + + s = set([X(),X()]) + s.add('foo') + assert not self.uses_strategy('IdentitySetStrategy',s) + s = set([X(),X()]) + s.add(NotIdent()) + assert not self.uses_strategy('IdentitySetStrategy',s) + + def test_identity_strategy_sanity(self): + + class X(object): + pass + + class Y(object): + pass + + a,b,c,d,e,f = X(),Y(),X(),Y(),X(),Y() + + s = set([a,b]).union(set([c])) + assert self.uses_strategy('IdentitySetStrategy',s) + assert set([a,b,c]) == s + s = set([a,b,c,d,e,f]) - set([d,e,f]) + assert self.uses_strategy('IdentitySetStrategy',s) + assert set([a,b,c]) == s + + + s = set([a]) + s.update([b,c]) + + assert s == set([a,b,c]) + + + def test_identity_strategy_iterators(self): + + class X(object): + pass + + s = set([X() for i in range(10)]) + counter = 0 + for item in s: + counter += 1 + assert item in s + + assert counter == 10 + + + def test_identity_strategy_other_cmp(self): + + # test tries to hit positive and negative in + # may_contain_equal_elements + + class X(object): + pass + + s = set([X() for i in range(10)]) + + assert s.intersection(set([1,2,3])) == set() + assert s.intersection(set(['a','b','c'])) == set() + assert s.intersection(set(['a','b','c'])) == set() + assert s.intersection(set([X(),X()])) == set() + + other = set(['a','b','c',s.__iter__().next()]) + intersect = s.intersection(other) + assert len(intersect) == 1 + assert intersect.__iter__().next() in s + assert intersect.__iter__().next() in other + + def test_class_monkey_patch(self): + + class X(object): + pass + + s = set() + + s.add(X()) + assert self.uses_strategy('IdentitySetStrategy',s) + X.__eq__ = lambda self,other : None + s.add(X()) + assert not self.uses_strategy('IdentitySetStrategy',s) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),X()])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) + + # An interesting case, add an instance, mutate the class, + # then add the same instance. + + class X(object): + pass + + s = set() + inst = X() + s.add(inst) + X.__eq__ = lambda x,y : x is y + s.add(inst) + + assert len(s) == 1 + assert s.__iter__().next() is inst + assert not self.uses_strategy('IdentitySetStrategy',s) + + + # Add instance, mutate class, check membership of that instance. + + class X(object): + pass + + + inst = X() + s = set() + s.add(inst) + X.__eq__ = lambda x,y : x is y + assert inst in s + + # Test Wrong strategy + # If the strategy is changed by mutation, but the instance + # does not change, then this tests the methods that call + # may_contain_equal_elements still function. + # i.e. same instance in two sets, one with object strategy, one with + # identity strategy. + + class X(object): + pass + + + inst = X() + s1 = set() + s1.add(inst) + assert self.uses_strategy('IdentitySetStrategy',s1) + X.__eq__ = lambda x,y : x is y + s2 = set() + s2.add(inst) + assert not self.uses_strategy('IdentitySetStrategy',s2) + + assert s1.intersection(s2) == set([inst]) + assert (s1 - s2) == set() + assert (s2 - s1) == set() + + s1.difference_update(s2) + assert s1 == set() diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -57,8 +57,8 @@ from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint from rpython.rlib.rarithmetic import LONG_BIT_SHIFT from rpython.rlib.debug import ll_assert, debug_print, debug_start, debug_stop -from rpython.rlib.objectmodel import we_are_translated -from rpython.tool.sourcetools import func_with_new_name +from rpython.rlib.objectmodel import specialize + # # Handles the objects in 2 generations: @@ -1824,6 +1824,48 @@ # ---------- # id() and identityhash() support + def _allocate_shadow(self, obj): + size_gc_header = self.gcheaderbuilder.size_gc_header + size = self.get_size(obj) + shadowhdr = self._malloc_out_of_nursery(size_gc_header + + size) + # Initialize the shadow enough to be considered a + # valid gc object. If the original object stays + # alive at the next minor collection, it will anyway + # be copied over the shadow and overwrite the + # following fields. But if the object dies, then + # the shadow will stay around and only be freed at + # the next major collection, at which point we want + # it to look valid (but ready to be freed). + shadow = shadowhdr + size_gc_header + self.header(shadow).tid = self.header(obj).tid + typeid = self.get_type_id(obj) + if self.is_varsize(typeid): + lenofs = self.varsize_offset_to_length(typeid) + (shadow + lenofs).signed[0] = (obj + lenofs).signed[0] + # + self.header(obj).tid |= GCFLAG_HAS_SHADOW + self.nursery_objects_shadows.setitem(obj, shadow) + return shadow + + def _find_shadow(self, obj): + # + # The object is not a tagged pointer, and it is still in the + # nursery. Find or allocate a "shadow" object, which is + # where the object will be moved by the next minor + # collection + if self.header(obj).tid & GCFLAG_HAS_SHADOW: + shadow = self.nursery_objects_shadows.get(obj) + ll_assert(shadow != NULL, + "GCFLAG_HAS_SHADOW but no shadow found") + else: + shadow = self._allocate_shadow(obj) + # + # The answer is the address of the shadow. + return shadow + _find_shadow._dont_inline_ = True + + @specialize.arg(2) def id_or_identityhash(self, gcobj, is_hash): """Implement the common logic of id() and identityhash() of an object, given as a GCREF. @@ -1832,41 +1874,7 @@ # if self.is_valid_gc_object(obj): if self.is_in_nursery(obj): - # - # The object is not a tagged pointer, and it is still in the - # nursery. Find or allocate a "shadow" object, which is - # where the object will be moved by the next minor - # collection - if self.header(obj).tid & GCFLAG_HAS_SHADOW: - shadow = self.nursery_objects_shadows.get(obj) - ll_assert(shadow != NULL, - "GCFLAG_HAS_SHADOW but no shadow found") - else: - size_gc_header = self.gcheaderbuilder.size_gc_header - size = self.get_size(obj) - shadowhdr = self._malloc_out_of_nursery(size_gc_header + - size) - # Initialize the shadow enough to be considered a - # valid gc object. If the original object stays - # alive at the next minor collection, it will anyway - # be copied over the shadow and overwrite the - # following fields. But if the object dies, then - # the shadow will stay around and only be freed at - # the next major collection, at which point we want - # it to look valid (but ready to be freed). - shadow = shadowhdr + size_gc_header - self.header(shadow).tid = self.header(obj).tid - typeid = self.get_type_id(obj) - if self.is_varsize(typeid): - lenofs = self.varsize_offset_to_length(typeid) - (shadow + lenofs).signed[0] = (obj + lenofs).signed[0] - # - self.header(obj).tid |= GCFLAG_HAS_SHADOW - self.nursery_objects_shadows.setitem(obj, shadow) - # - # The answer is the address of the shadow. - obj = shadow - # + obj = self._find_shadow(obj) elif is_hash: if self.header(obj).tid & GCFLAG_HAS_SHADOW: # @@ -1884,6 +1892,7 @@ if is_hash: i = mangle_hash(i) return i + id_or_identityhash._always_inline_ = True def id(self, gcobj): return self.id_or_identityhash(gcobj, False) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -424,7 +424,7 @@ self.identityhash_ptr = getfn(GCClass.identityhash.im_func, [s_gc, s_gcref], annmodel.SomeInteger(), - minimal_transform=False) + minimal_transform=False, inline=True) if getattr(GCClass, 'obtain_free_space', False): self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func, [s_gc, annmodel.SomeInteger()], @@ -433,7 +433,7 @@ if GCClass.moving_gc: self.id_ptr = getfn(GCClass.id.im_func, [s_gc, s_gcref], annmodel.SomeInteger(), - inline = False, + inline = True, minimal_transform = False) else: self.id_ptr = None diff --git a/rpython/translator/platform/arm.py b/rpython/translator/platform/arm.py --- a/rpython/translator/platform/arm.py +++ b/rpython/translator/platform/arm.py @@ -17,6 +17,7 @@ class ARM(Linux): name = "arm" + shared_only = ('-fPIC',) available_librarydirs = [SB2 + '/lib/arm-linux-gnueabi/', SB2 + '/lib/arm-linux-gnueabihf/', From noreply at buildbot.pypy.org Tue Jul 2 06:54:55 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 2 Jul 2013 06:54:55 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-segfault: document branch Message-ID: <20130702045455.0D43D1C0168@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-segfault Changeset: r65161:5e52e47a4a8d Date: 2013-07-02 07:51 +0300 http://bitbucket.org/pypy/pypy/changeset/5e52e47a4a8d/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -65,3 +65,6 @@ .. branch: ndarray-view Add view to ndarray and zeroD arrays, not on dtype scalars yet +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + From noreply at buildbot.pypy.org Tue Jul 2 06:54:56 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 2 Jul 2013 06:54:56 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-segfault: close to-be-merged branch Message-ID: <20130702045456.48AE61C301A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-segfault Changeset: r65162:a038818305e0 Date: 2013-07-02 07:51 +0300 http://bitbucket.org/pypy/pypy/changeset/a038818305e0/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Tue Jul 2 06:54:57 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 2 Jul 2013 06:54:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge numpypy-segfault fixing segfault while iterating over empty ndarrays Message-ID: <20130702045457.B91611C0168@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65163:49b114578eb7 Date: 2013-07-02 07:54 +0300 http://bitbucket.org/pypy/pypy/changeset/49b114578eb7/ Log: merge numpypy-segfault fixing segfault while iterating over empty ndarrays diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -65,6 +65,9 @@ .. branch: ndarray-view Add view to ndarray and zeroD arrays, not on dtype scalars yet +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + .. branch: identity-set Faster sets for objects diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -65,7 +65,7 @@ [ 3., 4., -1.], [-1., -1., -1.]]) - + NOTE: support for not passing x and y is unsupported """ if space.is_none(w_y): @@ -122,10 +122,10 @@ for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("record type mismatch")) elif dtype.is_record_type() or a_dt.is_record_type(): - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -46,6 +46,7 @@ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.arrayimpl import base +from pypy.module.micronumpy.support import product from rpython.rlib import jit # structures to describe slicing @@ -225,7 +226,7 @@ self.shape = shape self.offset = start self.shapelen = len(shape) - self._done = False + self._done = self.shapelen == 0 or product(shape) == 0 self.strides = strides self.backstrides = backstrides self.size = array.size @@ -284,7 +285,7 @@ self.backstrides = backstrides[:dim] + [0] + backstrides[dim:] self.first_line = True self.indices = [0] * len(shape) - self._done = False + self._done = array.get_size() == 0 self.offset = array.start self.dim = dim self.array = array diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -293,6 +293,14 @@ b = array(a, copy=False, ndmin=4) b[0,0,0,0] = 0 assert a[0, 0] == 0 + a = array([[[]]]) + # Simulate tiling an empty array, really tests repeat, reshape + # b = tile(a, (3, 2, 5)) + reps = (3, 4, 5) + c = array(a, copy=False, subok=True, ndmin=len(reps)) + d = c.reshape(3, 4, 0) + e = d.repeat(3, 0) + assert e.shape == (9, 4, 0) def test_type(self): from numpypy import array @@ -2562,6 +2570,9 @@ a = array(range(100) + range(100) + range(100)) b = a.argsort() assert (b[:3] == [0, 100, 200]).all() + a = array([[[]]]).reshape(3,4,0) + b = a.argsort() + assert b.size == 0 def test_argsort_random(self): from numpypy import array From noreply at buildbot.pypy.org Tue Jul 2 07:53:53 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 2 Jul 2013 07:53:53 +0200 (CEST) Subject: [pypy-commit] stmgc default: add build and release versions of demo_random to tests Message-ID: <20130702055353.350521C0343@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r337:d197afc97b0e Date: 2013-07-02 07:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/d197afc97b0e/ Log: add build and release versions of demo_random to tests diff --git a/c4/test/test_zdemo_random.py b/c4/test/test_zdemo_random.py --- a/c4/test/test_zdemo_random.py +++ b/c4/test/test_zdemo_random.py @@ -2,7 +2,7 @@ import os import subprocess -def test_and_run(): +def test_and_run_debug(): path = os.path.dirname(__file__) path = os.path.dirname(path) res = subprocess.call(["make", "debug-demo_random"], cwd=path) @@ -10,4 +10,19 @@ res = subprocess.call(["./debug-demo_random"], cwd=path) assert not res - +def test_and_run_build(): + path = os.path.dirname(__file__) + path = os.path.dirname(path) + res = subprocess.call(["make", "build-demo_random"], cwd=path) + assert not res + res = subprocess.call(["./build-demo_random"], cwd=path) + assert not res + +def test_and_run_release(): + path = os.path.dirname(__file__) + path = os.path.dirname(path) + res = subprocess.call(["make", "release-demo_random"], cwd=path) + assert not res + res = subprocess.call(["./release-demo_random"], cwd=path) + assert not res + From noreply at buildbot.pypy.org Tue Jul 2 09:30:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 2 Jul 2013 09:30:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Yay, targetdemo2 passes Message-ID: <20130702073006.380571C0295@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65164:041f5c2e4e68 Date: 2013-07-02 09:29 +0200 http://bitbucket.org/pypy/pypy/changeset/041f5c2e4e68/ Log: Yay, targetdemo2 passes diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -46,10 +46,22 @@ def gc_header_for(self, obj, needs_hash=False): return self.gcdata.gc.gcheaderbuilder.header_of_object(obj) + def _gct_with_roots_pushed(self, hop): + livevars = self.push_roots(hop) + self.default(hop) + self.pop_roots(hop, livevars) + + gct_stm_become_inevitable = _gct_with_roots_pushed + gct_stm_perform_transaction = _gct_with_roots_pushed + class StmRootWalker(BaseRootWalker): def need_thread_support(self, gctransformer, getfn): + # gc_thread_start() and gc_thread_die() don't need to become + # anything. When a new thread start, there is anyway first + # the "after/before" callbacks from rffi, which contain calls + # to "stm_enter_callback_call/stm_leave_callback_call". pass def walk_stack_roots(self, collect_stack_root): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -424,10 +424,10 @@ 'stm_initialize': LLOp(), 'stm_finalize': LLOp(), 'stm_barrier': LLOp(sideeffects=False), - 'stm_allocate': LLOp(sideeffects=False), - 'stm_become_inevitable': LLOp(), - 'stm_minor_collect': LLOp(), - 'stm_major_collect': LLOp(), + 'stm_allocate': LLOp(sideeffects=False, canmallocgc=True), + 'stm_become_inevitable': LLOp(canmallocgc=True), + 'stm_minor_collect': LLOp(canmallocgc=True), + 'stm_major_collect': LLOp(canmallocgc=True), 'stm_get_tid': LLOp(canfold=True), 'stm_ptr_eq': LLOp(canfold=True), 'stm_id': LLOp(sideeffects=False), @@ -440,7 +440,7 @@ 'stm_set_transaction_length': LLOp(), 'stm_change_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), - 'stm_perform_transaction':LLOp(), + 'stm_perform_transaction':LLOp(canmallocgc=True), 'stm_enter_callback_call':LLOp(), 'stm_leave_callback_call':LLOp(), diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py --- a/rpython/translator/stm/jitdriver.py +++ b/rpython/translator/stm/jitdriver.py @@ -63,7 +63,6 @@ # store (green args, red args) into p # return 1 # causes perform_tr() to loop and call us again # p.result_value = result_value - # p.got_exception = NULL # return 0 # stop perform_tr() and returns def __init__(self, stmtransformer, graph): @@ -212,21 +211,16 @@ blockst.closeblock(Link(a_vars, link.target)) # # hack at the regular return block, to set the result into - # 'p.result_value', clear 'p.got_exception', and return 0 + # 'p.result_value' and return 0. Note that 'p.got_exception' + # is already cleared. blockr = callback_graph.returnblock c_result_value = Constant('result_value', lltype.Void) - c_got_exception = Constant('got_exception', lltype.Void) - c_null = Constant(lltype.nullptr(self.CONTAINER.got_exception.TO), - self.CONTAINER.got_exception) v_p = self.container_var() renamed_p[blockr] = v_p blockr.operations = [ SpaceOperation('setfield', [v_p, c_result_value, blockr.inputargs[0]], varoftype(lltype.Void)), - SpaceOperation('setfield', - [v_p, c_got_exception, c_null], - varoftype(lltype.Void)), ] v = varoftype(lltype.Signed) annotator.setbinding(v, s_Int) diff --git a/rpython/translator/stm/stmgcintf.py b/rpython/translator/stm/stmgcintf.py --- a/rpython/translator/stm/stmgcintf.py +++ b/rpython/translator/stm/stmgcintf.py @@ -7,7 +7,7 @@ cdir = os.path.abspath(os.path.join(cdir2, '..', 'stm')) separate_source = ''' -#define _GC_DEBUG 2 /* XXX move elsewhere */ +//#define _GC_DEBUG 2 /* XXX move elsewhere */ #include "src_stm/stmgc.h" diff --git a/rpython/translator/stm/test/support.py b/rpython/translator/stm/test/support.py --- a/rpython/translator/stm/test/support.py +++ b/rpython/translator/stm/test/support.py @@ -14,10 +14,11 @@ # Prevent the RaiseAnalyzer from just emitting "WARNING: Unknown # operation". We want instead it to crash. from rpython.translator.backendopt.canraise import RaiseAnalyzer - RaiseAnalyzer.fail_on_unknown_operation = True + prev_setting = RaiseAnalyzer.fail_on_unknown_operation try: + RaiseAnalyzer.fail_on_unknown_operation = True res = StandaloneTests.compile(self, entry_point, debug=True, **kwds) finally: - RaiseAnalyzer.fail_on_unknown_operation = False + RaiseAnalyzer.fail_on_unknown_operation = prev_setting return res diff --git a/rpython/translator/stm/test/targetdemo2.py b/rpython/translator/stm/test/targetdemo2.py --- a/rpython/translator/stm/test/targetdemo2.py +++ b/rpython/translator/stm/test/targetdemo2.py @@ -250,7 +250,6 @@ def setup_threads(): #space.threadlocals.setup_threads(space) bootstrapper.setup() - rstm.invoke_around_extcall() def start_thread(args): bootstrapper.acquire(args) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -115,8 +115,7 @@ def test_targetdemo(self): t, cbuilder = self.compile(targetdemo2.entry_point) - data, dataerr = cbuilder.cmdexec('4 5000', err=True, - env={'PYPY_GC_DEBUG': '1'}) + data, dataerr = cbuilder.cmdexec('4 5000', err=True) assert 'check ok!' in data def test_bug1(self): From noreply at buildbot.pypy.org Tue Jul 2 12:24:02 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 2 Jul 2013 12:24:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: this assert can fail Message-ID: <20130702102402.C54E51C13AC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r338:2040cea49c77 Date: 2013-07-02 12:23 +0200 http://bitbucket.org/pypy/stmgc/changeset/2040cea49c77/ Log: this assert can fail diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -44,7 +44,11 @@ void stmgc_done_nursery(void) { struct tx_descriptor *d = thread_descriptor; - assert(!minor_collect_anything_to_do(d)); + /* someone may have called minor_collect_soon() + inbetween the preceeding minor_collect() and + this assert (committransaction() -> + updatechainheads() -> stub_malloc() -> ...): */ + /* assert(!minor_collect_anything_to_do(d)); */ stm_free(d->nursery_base, GC_NURSERY); gcptrlist_delete(&d->old_objects_to_trace); From noreply at buildbot.pypy.org Tue Jul 2 15:13:22 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 2 Jul 2013 15:13:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Use re.search() to find MSVC compiler version. Message-ID: <20130702131322.0FC411C0295@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r65165:140cb204a130 Date: 2013-07-02 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/140cb204a130/ Log: Use re.search() to find MSVC compiler version. This will handle the Chinese translation of the compiler. diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -119,7 +119,7 @@ # detect version of current compiler returncode, stdout, stderr = _run_subprocess(self.cc, '', env=self.c_environ) - r = re.match(r'Microsoft.+C/C\+\+.+\s([0-9]+)\.([0-9]+).*', stderr) + r = re.search(r'Microsoft.+C/C\+\+.+\s([0-9]+)\.([0-9]+).*', stderr) if r is not None: self.version = int(''.join(r.groups())) / 10 - 60 else: From noreply at buildbot.pypy.org Tue Jul 2 16:52:46 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 2 Jul 2013 16:52:46 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: add a whatsnew document for the 2.1 beta and release branch Message-ID: <20130702145246.BB38C1C2FBE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65166:00482cc53f65 Date: 2013-07-02 16:52 +0200 http://bitbucket.org/pypy/pypy/changeset/00482cc53f65/ Log: add a whatsnew document for the 2.1 beta and release branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.1.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.1.rst From noreply at buildbot.pypy.org Wed Jul 3 09:54:21 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 3 Jul 2013 09:54:21 +0200 (CEST) Subject: [pypy-commit] stmgc default: add stm_thread_local_obj to demo_random.c Message-ID: <20130703075421.B52D81C13B4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r339:e7d1df97b6f0 Date: 2013-07-03 09:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/e7d1df97b6f0/ Log: add stm_thread_local_obj to demo_random.c diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -293,6 +293,20 @@ td.roots[i] = (gcptr)allocate_node(); } + if (td.thread_seed % 3 == 0) { + stm_thread_local_obj = (gcptr)allocate_node(); + } + else if (td.thread_seed % 3 == 1) { + stm_thread_local_obj = allocate_pseudoprebuilt_with_hash + ( + sizeof(struct node), GCTID_STRUCT_NODE, PREBUILT); + ((nodeptr)stm_thread_local_obj)->hash = PREBUILT; + } + else { + stm_thread_local_obj = allocate_pseudoprebuilt + (sizeof(struct node), GCTID_STRUCT_NODE); + } + } gcptr rare_events(gcptr p, gcptr _r, gcptr _sr) @@ -432,7 +446,7 @@ w_t->hash = stm_hash((gcptr)w_t); assert(w_t->hash == stm_hash((gcptr)_t)); } - if (w_t->hash >= 0 && (w_t->hash < PREBUILT || + if (w_t->hash >= 0 && (w_t->hash <= PREBUILT || w_t->hash < SHARED_ROOTS)) { // should be with predefined hash assert (stm_id((gcptr)w_t) != stm_hash((gcptr)w_t)); @@ -449,8 +463,11 @@ gcptr _r, _sr; int num, k; - num = get_rand(td.num_roots); - _r = td.roots[num]; + num = get_rand(td.num_roots+1); + if (num == 0) + _r = stm_thread_local_obj; + else + _r = td.roots[num - 1]; num = get_rand(SHARED_ROOTS); _sr = shared_roots[num]; From noreply at buildbot.pypy.org Wed Jul 3 10:12:03 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 3 Jul 2013 10:12:03 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: update version numbers Message-ID: <20130703081203.89FC91C01C0@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65167:20c90cdc32a6 Date: 2013-07-02 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/20c90cdc32a6/ Log: update version numbers diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-alpha0" +#define PYPY_VERSION "2.1.0-beta1" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 1, 0, "beta", 1) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Wed Jul 3 10:12:05 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 3 Jul 2013 10:12:05 +0200 (CEST) Subject: [pypy-commit] pypy default: add a whatsnew document for the 2.1 beta and release branch Message-ID: <20130703081205.788D91C01C0@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65168:bcc8da750d52 Date: 2013-07-02 16:52 +0200 http://bitbucket.org/pypy/pypy/changeset/bcc8da750d52/ Log: add a whatsnew document for the 2.1 beta and release branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.1.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.1.rst From noreply at buildbot.pypy.org Wed Jul 3 10:16:24 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 3 Jul 2013 10:16:24 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20130703081624.F25161C01C0@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65169:15b0489c15d8 Date: 2013-07-03 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/15b0489c15d8/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -5,6 +5,9 @@ .. this is a revision shortly after release-2.0 .. startrev: a13c07067613 +.. branch: ndarray-ptp +put and array.put + .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,9 @@ .. this is a revision shortly after release-2.0 .. startrev: a13c07067613 +.. branch: ndarray-ptp +put and array.put + .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) From noreply at buildbot.pypy.org Wed Jul 3 15:21:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 3 Jul 2013 15:21:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add more checks Message-ID: <20130703132125.12C391C125E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r340:57601aa1cb4b Date: 2013-07-03 15:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/57601aa1cb4b/ Log: Add more checks diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -253,6 +253,7 @@ for (i = 0; i < size; i += 2) { gcptr B = items[i]; assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */ + assert(B->h_tid & GCFLAG_PUBLIC); /* to be on the safe side --- but actually needed, see the gcptrlist_insert2(L, NULL) above */ @@ -264,6 +265,7 @@ assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); assert(IS_POINTER(L->h_revision)); + assert(B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); g2l_insert(&d->public_to_private, B, L); /* this is definitely needed: all keys in public_to_private From noreply at buildbot.pypy.org Wed Jul 3 15:21:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 3 Jul 2013 15:21:26 +0200 (CEST) Subject: [pypy-commit] stmgc default: Bah?? No clue. Move -lrt at the end of the gcc invocation, for pleasing Message-ID: <20130703132126.3EBE71C138A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r341:92ca4b3673d1 Date: 2013-07-03 15:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/92ca4b3673d1/ Log: Bah?? No clue. Move -lrt at the end of the gcc invocation, for pleasing gcc on tannit. diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -26,13 +26,13 @@ # note that we don't say -DNDEBUG, so that asserts should still be compiled it build-%: %.c ${H_FILES} ${C_FILES} stmgc.c - gcc -lrt -pthread -O2 -g $< -o build-$* -Wall stmgc.c + gcc -pthread -O2 -g $< -o build-$* -Wall stmgc.c -lrt debug-%: %.c ${H_FILES} ${C_FILES} - gcc -lrt -pthread ${DEBUG} $< -o debug-$* -Wall ${C_FILES} + gcc -pthread ${DEBUG} $< -o debug-$* -Wall ${C_FILES} -lrt release-%: %.c ${H_FILES} ${C_FILES} stmgc.c - gcc -lrt -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c + gcc -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c -lrt test-%: ./$* 2>/dev/null | grep "check ok" From noreply at buildbot.pypy.org Wed Jul 3 15:21:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 3 Jul 2013 15:21:27 +0200 (CEST) Subject: [pypy-commit] stmgc default: merge heads Message-ID: <20130703132127.507411C13B4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r342:65b6e31e83b5 Date: 2013-07-03 15:21 +0200 http://bitbucket.org/pypy/stmgc/changeset/65b6e31e83b5/ Log: merge heads diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -26,13 +26,13 @@ # note that we don't say -DNDEBUG, so that asserts should still be compiled it build-%: %.c ${H_FILES} ${C_FILES} stmgc.c - gcc -lrt -pthread -O2 -g $< -o build-$* -Wall stmgc.c + gcc -pthread -O2 -g $< -o build-$* -Wall stmgc.c -lrt debug-%: %.c ${H_FILES} ${C_FILES} - gcc -lrt -pthread ${DEBUG} $< -o debug-$* -Wall ${C_FILES} + gcc -pthread ${DEBUG} $< -o debug-$* -Wall ${C_FILES} -lrt release-%: %.c ${H_FILES} ${C_FILES} stmgc.c - gcc -lrt -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c + gcc -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c -lrt test-%: ./$* 2>/dev/null | grep "check ok" From noreply at buildbot.pypy.org Wed Jul 3 16:13:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 3 Jul 2013 16:13:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix a hard bug. Message-ID: <20130703141347.D91B11C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r343:69be3c7a1f51 Date: 2013-07-03 16:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/69be3c7a1f51/ Log: Fix a hard bug. diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -318,6 +318,7 @@ *root = fresh_old_copy; /* add 'fresh_old_copy' to the list of objects to trace */ + assert(!(fresh_old_copy->h_tid & GCFLAG_PUBLIC)); gcptrlist_insert(&d->old_objects_to_trace, fresh_old_copy); } } @@ -477,7 +478,18 @@ assert(obj->h_tid & GCFLAG_OLD); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); - obj->h_tid |= GCFLAG_WRITE_BARRIER; + + /* We add the WRITE_BARRIER flag to objects here, but warning: + we may occasionally see a PUBLIC object --- one that was + a private/protected object when it was added to + old_objects_to_trace, and has been stolen. So we have to + check and not do any change the obj->h_tid in that case. + Otherwise this conflicts with the rule that we may only + modify obj->h_tid of a public object in order to add + PUBLIC_TO_PRIVATE. + */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) + obj->h_tid |= GCFLAG_WRITE_BARRIER; stmgc_trace(obj, &visit_if_young); } @@ -675,6 +687,7 @@ gcptr P = stmgcpage_malloc(allocate_size); memset(P, 0, allocate_size); P->h_tid = tid | GCFLAG_OLD; + assert(!(P->h_tid & GCFLAG_PUBLIC)); gcptrlist_insert(&d->old_objects_to_trace, P); return P; } From noreply at buildbot.pypy.org Wed Jul 3 17:04:25 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 3 Jul 2013 17:04:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: add _GC_DEBUGPRINTS, comments, and some fairly untested code about tracing h_original in major_collections Message-ID: <20130703150425.6DA241C2FBE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r344:f37e2b89a0fc Date: 2013-07-03 17:03 +0200 http://bitbucket.org/pypy/stmgc/changeset/f37e2b89a0fc/ Log: add _GC_DEBUGPRINTS, comments, and some fairly untested code about tracing h_original in major_collections diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -21,15 +21,16 @@ C_FILES = et.c lists.c steal.c nursery.c gcpage.c \ stmsync.c dbgmem.c fprintcolor.c -DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 +DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 -# note that we don't say -DNDEBUG, so that asserts should still be compiled it +# note that we don't say -DNDEBUG, so that asserts should still be compiled in +# also, all debug code with extra checks but not the debugprints build-%: %.c ${H_FILES} ${C_FILES} stmgc.c - gcc -pthread -O2 -g $< -o build-$* -Wall stmgc.c -lrt + gcc -pthread -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -g $< -o build-$* -Wall stmgc.c -lrt debug-%: %.c ${H_FILES} ${C_FILES} - gcc -pthread ${DEBUG} $< -o debug-$* -Wall ${C_FILES} -lrt + gcc -pthread -DDUMP_EXTRA=1 ${DEBUG} $< -o debug-$* -Wall ${C_FILES} -lrt release-%: %.c ${H_FILES} ${C_FILES} stmgc.c gcc -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c -lrt diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -105,6 +105,15 @@ *(to++) = *(from++); } +gcptr allocate_old(size_t size, int tid) +{ + gcptr p = stmgcpage_malloc(size); + memset(p, 0, size); + p->h_tid = GCFLAG_OLD | GCFLAG_WRITE_BARRIER | tid; + p->h_revision = -INT_MAX; + return p; +} + gcptr allocate_pseudoprebuilt(size_t size, int tid) { gcptr x = calloc(1, size); @@ -166,6 +175,15 @@ return 0; } +#ifdef _GC_DEBUG +int is_free_old(gcptr p) +{ + fprintf(stdout, "\n=== check ===\n"); + return (!_stm_can_access_memory((char*)p)) + || (p->h_tid == DEBUG_WORD(0xDD)); +} +#endif + void check_not_free(gcptr p) { assert(p != NULL); @@ -179,6 +197,16 @@ if (p != NULL) { check_not_free(p); classify(p); // additional asserts + if (p->h_original && !(p->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + // must point to valid old object + gcptr id = (gcptr)p->h_original; + assert(id->h_tid & GCFLAG_OLD); + check_not_free(id); +#ifdef _GC_DEBUG + if (!is_shared_prebuilt(id) && !(id->h_tid & GCFLAG_PREBUILT)) + assert(!is_free_old(id)); +#endif + } } } diff --git a/c4/fprintcolor.c b/c4/fprintcolor.c --- a/c4/fprintcolor.c +++ b/c4/fprintcolor.c @@ -5,7 +5,7 @@ { va_list ap; -#ifdef _GC_DEBUG +#ifdef _GC_DEBUGPRINTS dprintf(("STM Subsystem: Fatal Error\n")); #else fprintf(stderr, "STM Subsystem: Fatal Error\n"); @@ -19,7 +19,7 @@ } -#ifdef _GC_DEBUG +#ifdef _GC_DEBUGPRINTS static __thread revision_t tcolor = 0; static revision_t tnextid = 0; diff --git a/c4/fprintcolor.h b/c4/fprintcolor.h --- a/c4/fprintcolor.h +++ b/c4/fprintcolor.h @@ -6,7 +6,7 @@ __attribute__((format (printf, 1, 2), noreturn)); -#ifdef _GC_DEBUG +#ifdef _GC_DEBUGPRINTS #define dprintf(args) threadcolor_printf args int dprintfcolor(void); diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -235,6 +235,15 @@ if (!(obj->h_revision & 2)) { /* go visit the more recent version */ obj = (gcptr)obj->h_revision; + if ((gcptr)obj->h_original == prev_obj + && !(prev_obj->h_tid & GCFLAG_VISITED)) { + assert(0); // why never hit? + // prev_obj is the ID copy + prev_obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; + /* see fix_outdated() */ + prev_obj->h_tid |= GCFLAG_VISITED; + gcptrlist_insert(&objects_to_trace, prev_obj); + } } else { /* it's a stub: keep it if it points to a protected version, @@ -244,7 +253,11 @@ */ assert(obj->h_tid & GCFLAG_STUB); obj = (gcptr)(obj->h_revision - 2); - if (!(obj->h_tid & GCFLAG_PUBLIC)) { + if (!(obj->h_tid & GCFLAG_PUBLIC) || !(prev_obj->h_original)) { + assert(prev_obj->h_original); // why never hit? + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + /* never?: stub->public where stub is id copy? */ + prev_obj->h_tid |= GCFLAG_VISITED; assert(*pobj == prev_obj); gcptr obj1 = obj; @@ -256,6 +269,9 @@ } if (!(obj->h_revision & 3)) { + /* obj is neither a stub nor a most recent revision: + completely ignore obj->h_revision */ + obj = (gcptr)obj->h_revision; assert(obj->h_tid & GCFLAG_PUBLIC); prev_obj->h_revision = (revision_t)obj; @@ -274,7 +290,21 @@ assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); gcptr B = (gcptr)obj->h_revision; assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - + + gcptr id_copy = (gcptr)obj->h_original; + if (id_copy && id_copy != B) { + assert(id_copy == (gcptr)B->h_original); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + gcptrlist_insert(&objects_to_trace, id_copy); + } + else { + /* prebuilt originals won't get collected anyway + and if they are not reachable in any other way, + we only ever need their location, not their content */ + } + } + obj->h_tid |= GCFLAG_VISITED; B->h_tid |= GCFLAG_VISITED; assert(!(obj->h_tid & GCFLAG_STUB)); @@ -375,8 +405,24 @@ outdated, it will be found at that time */ gcptr R = item->addr; gcptr L = item->val; + + /* Objects that were not visited yet must have the PUB_TO_PRIV + flag. Except if that transaction will abort anyway, then it + may be removed from a previous major collection that didn't + fix the PUB_TO_PRIV because the transaction was going to + abort anyway: + 1. minor_collect before major collect (R->L, R is outdated, abort) + 2. major collect removes flag + 3. major collect again, same thread, no time to abort + 4. flag still removed + */ + assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, + R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); visit_keep(R); if (L != NULL) { + /* minor collection found R->L in public_to_young + and R was modified. It then sets item->val to NULL and wants + to abort later. */ revision_t v = L->h_revision; visit_keep(L); /* a bit of custom logic here: if L->h_revision used to @@ -384,8 +430,10 @@ keep this property, even though visit_keep(L) might decide it would be better to make it point to a more recent copy. */ - if (v == (revision_t)R) + if (v == (revision_t)R) { + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); L->h_revision = v; /* restore */ + } } } G2L_LOOP_END; @@ -448,6 +496,7 @@ just removing it is very wrong --- we want 'd' to abort. */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; } @@ -482,14 +531,16 @@ /* We are now after visiting all objects, and we know the * transaction isn't aborting because of this collection. We have * cleared GCFLAG_PUBLIC_TO_PRIVATE from public objects at the end - * of the chain. Now we have to set it again on public objects that - * have a private copy. + * of the chain (head revisions). Now we have to set it again on + * public objects that have a private copy. */ wlog_t *item; dprintf(("fix public_to_private on thread %p\n", d)); G2L_LOOP_FORWARD(d->public_to_private, item) { + assert(item->addr->h_tid & GCFLAG_VISITED); + assert(item->val->h_tid & GCFLAG_VISITED); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -48,7 +48,8 @@ inbetween the preceeding minor_collect() and this assert (committransaction() -> updatechainheads() -> stub_malloc() -> ...): */ - /* assert(!minor_collect_anything_to_do(d)); */ + assert(!minor_collect_anything_to_do(d) + || d->nursery_current == d->nursery_end); stm_free(d->nursery_base, GC_NURSERY); gcptrlist_delete(&d->old_objects_to_trace); @@ -430,6 +431,7 @@ gcptr P = items[i]; assert(P->h_tid & GCFLAG_PUBLIC); assert(P->h_tid & GCFLAG_OLD); + assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); revision_t v = ACCESS_ONCE(P->h_revision); wlog_t *item; diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -80,6 +80,7 @@ int stm_enter_callback_call(void) { int token = (thread_descriptor == NULL); + dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { stmgcpage_acquire_global_lock(); DescriptorInit(); @@ -93,6 +94,7 @@ void stm_leave_callback_call(int token) { + dprintf(("leave_callback_call(%d)\n", token)); if (token == 1) stmgc_minor_collect(); /* force everything out of the nursery */ From noreply at buildbot.pypy.org Wed Jul 3 17:19:47 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 3 Jul 2013 17:19:47 +0200 (CEST) Subject: [pypy-commit] pypy default: kill the include_debug_merge_points option and ignore debug_merge_points always Message-ID: <20130703151947.D530C1C2FBE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65170:4eeb5629fb68 Date: 2013-07-03 16:52 +0200 http://bitbucket.org/pypy/pypy/changeset/4eeb5629fb68/ Log: kill the include_debug_merge_points option and ignore debug_merge_points always diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -131,18 +131,18 @@ def has_id(self, id): return id in self.ids - def _ops_for_chunk(self, chunk, include_debug_merge_points): + def _ops_for_chunk(self, chunk): for op in chunk.operations: - if op.name != 'debug_merge_point' or include_debug_merge_points: + if op.name != 'debug_merge_point': yield op - def _allops(self, include_debug_merge_points=False, opcode=None): + def _allops(self, opcode=None): opcode_name = opcode for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode_name is None or \ (opcode and opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk): yield op else: for op in chunk.operations: @@ -162,15 +162,15 @@ def print_ops(self, *args, **kwds): print self.format_ops(*args, **kwds) - def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): + def _ops_by_id(self, id, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] - loop_ops = self.allops(include_debug_merge_points, opcode) + loop_ops = self.allops(opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk): if op in loop_ops: yield op From noreply at buildbot.pypy.org Wed Jul 3 17:19:49 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 3 Jul 2013 17:19:49 +0200 (CEST) Subject: [pypy-commit] pypy default: add an include_guard_not_invalidated option which defaults to true Message-ID: <20130703151949.6CA7F1C2FBE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65171:a75c05b8580e Date: 2013-07-03 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a75c05b8580e/ Log: add an include_guard_not_invalidated option which defaults to true diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -131,18 +131,19 @@ def has_id(self, id): return id in self.ids - def _ops_for_chunk(self, chunk): + def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point': + if op.name != 'debug_merge_point' and \ + (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op - def _allops(self, opcode=None): + def _allops(self, opcode=None, include_guard_not_invalidated=True): opcode_name = opcode for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode_name is None or \ (opcode and opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: for op in chunk.operations: @@ -162,7 +163,7 @@ def print_ops(self, *args, **kwds): print self.format_ops(*args, **kwds) - def _ops_by_id(self, id, opcode=None): + def _ops_by_id(self, id, include_guard_not_invalidated=True, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] loop_ops = self.allops(opcode) @@ -170,7 +171,7 @@ opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): if op in loop_ops: yield op diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -223,5 +223,5 @@ log = self.run(main, [1000]) assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) - ops = loop.ops_by_id('getitem') + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) assert log.opnames(ops) == [] From noreply at buildbot.pypy.org Wed Jul 3 17:19:50 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 3 Jul 2013 17:19:50 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: merge default Message-ID: <20130703151950.CA2FE1C2FBE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65172:0dd60277f779 Date: 2013-07-03 17:19 +0200 http://bitbucket.org/pypy/pypy/changeset/0dd60277f779/ Log: merge default diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -5,6 +5,9 @@ .. this is a revision shortly after release-2.0 .. startrev: a13c07067613 +.. branch: ndarray-ptp +put and array.put + .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,9 @@ .. this is a revision shortly after release-2.0 .. startrev: a13c07067613 +.. branch: ndarray-ptp +put and array.put + .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -131,18 +131,19 @@ def has_id(self, id): return id in self.ids - def _ops_for_chunk(self, chunk, include_debug_merge_points): + def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point' or include_debug_merge_points: + if op.name != 'debug_merge_point' and \ + (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op - def _allops(self, include_debug_merge_points=False, opcode=None): + def _allops(self, opcode=None, include_guard_not_invalidated=True): opcode_name = opcode for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode_name is None or \ (opcode and opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: for op in chunk.operations: @@ -162,15 +163,15 @@ def print_ops(self, *args, **kwds): print self.format_ops(*args, **kwds) - def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): + def _ops_by_id(self, id, include_guard_not_invalidated=True, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] - loop_ops = self.allops(include_debug_merge_points, opcode) + loop_ops = self.allops(opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): if op in loop_ops: yield op diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -223,5 +223,5 @@ log = self.run(main, [1000]) assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) - ops = loop.ops_by_id('getitem') + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) assert log.opnames(ops) == [] From noreply at buildbot.pypy.org Wed Jul 3 20:43:51 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 3 Jul 2013 20:43:51 +0200 (CEST) Subject: [pypy-commit] pypy py3k: hopefully fix a compilation error during translation w/ the jit: forward Message-ID: <20130703184351.32F781C125C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65173:d2d7be13cbad Date: 2013-07-03 11:43 -0700 http://bitbucket.org/pypy/pypy/changeset/d2d7be13cbad/ Log: hopefully fix a compilation error during translation w/ the jit: forward declare the external locale functions diff --git a/pypy/module/_codecs/locale.h b/pypy/module/_codecs/locale.h new file mode 100644 --- /dev/null +++ b/pypy/module/_codecs/locale.h @@ -0,0 +1,7 @@ +#include +#include + +wchar_t* pypy_char2wchar(const char* arg, size_t *size); +void pypy_char2wchar_free(wchar_t *text); +char* pypy_wchar2char(const wchar_t *text, size_t *error_pos); +void pypy_wchar2char_free(char *bytes); diff --git a/pypy/module/_codecs/locale.py b/pypy/module/_codecs/locale.py --- a/pypy/module/_codecs/locale.py +++ b/pypy/module/_codecs/locale.py @@ -21,6 +21,7 @@ cwd = py.path.local(__file__).dirpath() eci = ExternalCompilationInfo( + includes=[cwd.join('locale.h')], separate_module_files=[cwd.join('locale.c')], export_symbols=['pypy_char2wchar', 'pypy_char2wchar_free', 'pypy_wchar2char', 'pypy_wchar2char_free']) From noreply at buildbot.pypy.org Wed Jul 3 21:22:39 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 3 Jul 2013 21:22:39 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: a branch to allow subtype of ndarray, add tests and start to implement Message-ID: <20130703192239.B2F141C1001@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65174:38c0e3d688bc Date: 2013-07-03 18:31 +0300 http://bitbucket.org/pypy/pypy/changeset/38c0e3d688bc/ Log: a branch to allow subtype of ndarray, add tests and start to implement diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -23,7 +23,7 @@ self.implementation = implementation @staticmethod - def from_shape(shape, dtype, order='C'): + def from_shape(shape, dtype, order='C', subtype_and_space=(None, None)): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -32,10 +32,16 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) + if subtype_and_space[0]: + space = subtype_and_space[1] + subtype = subtype_and_space[0] + ret = space.allocate_instance(W_NDimArray, subtype) + W_NDimArray.__init__(ret, impl) + return ret return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(shape, storage, dtype, order='C', owning=False): + def from_shape_and_storage(shape, storage, dtype, order='C', owning=False, subtype_and_space=(None, None)): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -46,6 +52,13 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) + if subtype_and_space[0]: + print 'creating subclass',subtype_and_space + space = subtype_and_space[1] + subtype = subtype_and_space[0] + ret = space.allocate_instance(W_NDimArray, subtype) + W_NDimArray.__init__(ret, impl) + return ret return W_NDimArray(impl) @staticmethod diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -914,7 +914,7 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape_and_storage(shape, storage, dtype) + return W_NDimArray.from_shape_and_storage(shape, storage, dtype, (space, w_cls)) W_NDimArray.typedef = TypeDef( "ndarray", diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1442,7 +1442,7 @@ assert x.view('int8').shape == (10, 3) def test_ndarray_view_empty(self): - from numpypy import array, int8, int16, dtype + from numpypy import array, int8, int16 x = array([], dtype=[('a', int8), ('b', int8)]) y = x.view(dtype=int16) @@ -1458,6 +1458,20 @@ skip('not implemented yet') assert s.view('double') < 7e-323 + def test_subclass_view(self): + from numpypy import ndarray, array + class matrix(ndarray): + def __new__(subtype, data, dtype=None, copy=True): + print 'matix.__new__(',subtype,',',data,'...)' + if isinstance(data, matrix): + return data + return data.view(subtype) + a = array(range(5)) + b = matrix(a) + print type(b),b + assert False + assert (b == a).all() + def test_tolist_scalar(self): from numpypy import int32, bool_ x = int32(23) @@ -2871,6 +2885,12 @@ assert y[0, 1] == 2 y[0, 1] = 42 assert x[1] == 42 + class C(ndarray): + pass + z = ndarray._from_shape_and_storage([4, 1], addr, x.dtype, C) + assert isinstance(z, C) + assert z.shape == (4, 1) + assert z[1, 0] == 42 def test___pypy_data__(self): from numpypy import array From noreply at buildbot.pypy.org Wed Jul 3 21:22:41 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 3 Jul 2013 21:22:41 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: pass a test Message-ID: <20130703192241.0965C1C315F@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65175:2f8ce730591a Date: 2013-07-03 18:48 +0300 http://bitbucket.org/pypy/pypy/changeset/2f8ce730591a/ Log: pass a test diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -903,7 +903,7 @@ return W_NDimArray.from_shape(shape, dtype) @unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype): +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subclass=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -912,9 +912,14 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), + w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape_and_storage(shape, storage, dtype, (space, w_cls)) + if w_subclass: + return W_NDimArray.from_shape_and_storage(shape, storage, dtype, 'C', + False, (w_subclass, space)) + else: + return W_NDimArray.from_shape_and_storage(shape, storage, dtype) W_NDimArray.typedef = TypeDef( "ndarray", From noreply at buildbot.pypy.org Wed Jul 3 21:22:42 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 3 Jul 2013 21:22:42 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: pass existing tests Message-ID: <20130703192242.572431C1001@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65176:97487dbdf597 Date: 2013-07-03 22:21 +0300 http://bitbucket.org/pypy/pypy/changeset/97487dbdf597/ Log: pass existing tests diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -53,7 +53,6 @@ impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) if subtype_and_space[0]: - print 'creating subclass',subtype_and_space space = subtype_and_space[1] subtype = subtype_and_space[0] ret = space.allocate_instance(W_NDimArray, subtype) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -619,9 +619,9 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - if w_type is not None: - raise OperationError(space.w_NotImplementedError, space.wrap( - "view(... type=) not implemented yet")) + if not w_type and w_dtype and w_dtype.issubtype(space.gettypefor(W_NDimArray)): + w_type = w_dtype + w_dtype = None if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -651,7 +651,12 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize - return W_NDimArray(impl.get_view(self, dtype, new_shape)) + v = impl.get_view(self, dtype, new_shape) + if w_type is not None: + ret = space.allocate_instance(W_NDimArray, w_type) + W_NDimArray.__init__(ret, v) + return ret + return W_NDimArray(v) # --------------------- operations ---------------------------- @@ -887,20 +892,22 @@ self.implementation = W_NDimArray.from_shape_and_storage([space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, owning=True).implementation - at unwrap_spec(offset=int) + at unwrap_spec(offset=int, order=str) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, w_order=None): + offset=0, w_strides=None, order='C'): if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_order) or not space.is_none(w_buffer)): raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) + print 'desc_new_array(space,',w_subtype,',',shape,',',dtype,'...)' if not shape: return W_NDimArray.new_scalar(space, dtype) - return W_NDimArray.from_shape(shape, dtype) + if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): + return W_NDimArray.from_shape(shape, dtype, order) + return W_NDimArray.from_shape(shape, dtype, order, (w_subtype, space)) @unwrap_spec(addr=int) def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subclass=None): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1458,20 +1458,32 @@ skip('not implemented yet') assert s.view('double') < 7e-323 - def test_subclass_view(self): + def test_subtype_view(self): from numpypy import ndarray, array class matrix(ndarray): def __new__(subtype, data, dtype=None, copy=True): - print 'matix.__new__(',subtype,',',data,'...)' if isinstance(data, matrix): return data return data.view(subtype) a = array(range(5)) b = matrix(a) - print type(b),b - assert False + assert isinstance(b, matrix) assert (b == a).all() + def test_subtype_base(self): + from numpypy import ndarray, dtype + class C(ndarray): + def __new__(subtype, shape, dtype): + self = ndarray.__new__(subtype, shape, dtype) + self.id = 'subtype' + return self + a = C([2, 2], int) + assert isinstance(a, C) + assert isinstance(a, ndarray) + assert a.shape == (2, 2) + assert a.dtype is dtype(int) + assert a.id == 'subtype' + def test_tolist_scalar(self): from numpypy import int32, bool_ x = int32(23) @@ -2905,7 +2917,7 @@ class AppTestLongDoubleDtypes(BaseNumpyAppTest): def setup_class(cls): from pypy.module.micronumpy import Module - print dir(Module.interpleveldefs) + #print dir(Module.interpleveldefs) if not Module.interpleveldefs.get('longfloat', None): py.test.skip('no longdouble types yet') BaseNumpyAppTest.setup_class.im_func(cls) From noreply at buildbot.pypy.org Wed Jul 3 21:38:00 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 3 Jul 2013 21:38:00 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: a failing test Message-ID: <20130703193800.9A1B21C315F@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65177:ef040bfffe60 Date: 2013-07-03 22:37 +0300 http://bitbucket.org/pypy/pypy/changeset/ef040bfffe60/ Log: a failing test diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1483,6 +1483,12 @@ assert a.shape == (2, 2) assert a.dtype is dtype(int) assert a.id == 'subtype' + a.fill(3) + b = a[0] + assert isinstance(b, C) + assert (b == 3).all() + b[0]=100 + assert a[0,0] == 100 def test_tolist_scalar(self): from numpypy import int32, bool_ From noreply at buildbot.pypy.org Wed Jul 3 22:16:01 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 3 Jul 2013 22:16:01 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20130703201601.22EAD1C00B1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65178:5d231843e448 Date: 2013-07-03 13:14 -0700 http://bitbucket.org/pypy/pypy/changeset/5d231843e448/ Log: 2to3 diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py --- a/pypy/objspace/std/test/test_identityset.py +++ b/pypy/objspace/std/test/test_identityset.py @@ -53,7 +53,7 @@ assert self.uses_strategy('IdentitySetStrategy',set([X(),X()])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) - assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) def test_identity_strategy_add(self): @@ -125,11 +125,11 @@ assert s.intersection(set(['a','b','c'])) == set() assert s.intersection(set([X(),X()])) == set() - other = set(['a','b','c',s.__iter__().next()]) + other = set(['a','b','c',next(s.__iter__())]) intersect = s.intersection(other) assert len(intersect) == 1 - assert intersect.__iter__().next() in s - assert intersect.__iter__().next() in other + assert next(intersect.__iter__()) in s + assert next(intersect.__iter__()) in other def test_class_monkey_patch(self): @@ -145,7 +145,7 @@ assert not self.uses_strategy('IdentitySetStrategy',s) assert not self.uses_strategy('IdentitySetStrategy',set([X(),X()])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) - assert not self.uses_strategy('IdentitySetStrategy',set([X(),u""])) + assert not self.uses_strategy('IdentitySetStrategy',set([X(),""])) assert not self.uses_strategy('IdentitySetStrategy',set([X(),1])) # An interesting case, add an instance, mutate the class, @@ -161,7 +161,7 @@ s.add(inst) assert len(s) == 1 - assert s.__iter__().next() is inst + assert next(s.__iter__()) is inst assert not self.uses_strategy('IdentitySetStrategy',s) From noreply at buildbot.pypy.org Wed Jul 3 22:16:02 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 3 Jul 2013 22:16:02 +0200 (CEST) Subject: [pypy-commit] pypy py3k: type can inherit object's __eq__ now Message-ID: <20130703201602.957091C00B1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65179:983d7aa0f41d Date: 2013-07-03 13:14 -0700 http://bitbucket.org/pypy/pypy/changeset/983d7aa0f41d/ Log: type can inherit object's __eq__ now diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1145,9 +1145,6 @@ "type object '%s' has no attribute '%s'", w_type.name, name) -def eq__Type_Type(space, w_self, w_other): - return space.is_(w_self, w_other) - # ____________________________________________________________ From noreply at buildbot.pypy.org Thu Jul 4 01:42:29 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 4 Jul 2013 01:42:29 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: helper function to create subtypes (when needed) without calling their __new__ (amaury) Message-ID: <20130703234229.BDC4D1C00B1@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65180:3e11268b64cf Date: 2013-07-04 02:41 +0300 http://bitbucket.org/pypy/pypy/changeset/3e11268b64cf/ Log: helper function to create subtypes (when needed) without calling their __new__ (amaury) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -10,6 +10,14 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) +def wrap_impl(space, cls, impl): + if space.is_w(space.type(cls), space.gettypefor(W_NDimArray)): + ret = W_NDimArray(impl) + else: + ret = space.allocate_instance(W_NDimArray, space.type(cls)) + print 'created',space.type(ret) + W_NDimArray.__init__(ret, impl) + return ret class ArrayArgumentException(Exception): pass diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, issequence_w + ArrayArgumentException, issequence_w, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ interp_arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ @@ -298,7 +298,7 @@ new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) new_impl = self.implementation.reshape(space, self, new_shape) if new_impl is not None: - return W_NDimArray(new_impl) + return wrap_impl(space, self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: @@ -902,7 +902,6 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - print 'desc_new_array(space,',w_subtype,',',shape,',',dtype,'...)' if not shape: return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1476,6 +1476,7 @@ def __new__(subtype, shape, dtype): self = ndarray.__new__(subtype, shape, dtype) self.id = 'subtype' + print 'called new' return self a = C([2, 2], int) assert isinstance(a, C) @@ -1483,6 +1484,14 @@ assert a.shape == (2, 2) assert a.dtype is dtype(int) assert a.id == 'subtype' + print '1' + a = a.reshape(1, 4) + print '2' + b = a.reshape(4, 1) + print '3' + assert isinstance(b, C) + #make sure __new__ was not called + assert not getattr(b, 'id', None) a.fill(3) b = a[0] assert isinstance(b, C) From noreply at buildbot.pypy.org Thu Jul 4 08:10:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 4 Jul 2013 08:10:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Update the FAQ about the GIL and STM. Message-ID: <20130704061036.798681C1001@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65181:d0f456167591 Date: 2013-07-04 08:09 +0200 http://bitbucket.org/pypy/pypy/changeset/d0f456167591/ Log: Update the FAQ about the GIL and STM. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? From noreply at buildbot.pypy.org Thu Jul 4 10:27:22 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 4 Jul 2013 10:27:22 +0200 (CEST) Subject: [pypy-commit] stmgc default: refactor keeping alive the h_originals in major collections Message-ID: <20130704082722.BFACB1C1014@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r345:7ae6aa7d16af Date: 2013-07-04 09:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/7ae6aa7d16af/ Log: refactor keeping alive the h_originals in major collections diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -212,6 +212,24 @@ static struct GcPtrList objects_to_trace; +static void keep_original_alive(gcptr obj) +{ + /* keep alive the original of a visited object */ + gcptr id_copy = (gcptr)obj->h_original; + /* prebuilt original objects may have a predifined + hash in h_original */ + if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + gcptrlist_insert(&objects_to_trace, id_copy); + } + else { + /* prebuilt originals won't get collected anyway + and if they are not reachable in any other way, + we only ever need their location, not their content */ + } + } +} + static void visit(gcptr *pobj) { gcptr obj = *pobj; @@ -226,6 +244,8 @@ obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; gcptrlist_insert(&objects_to_trace, obj); + + keep_original_alive(obj); } } else if (obj->h_tid & GCFLAG_PUBLIC) { @@ -235,15 +255,6 @@ if (!(obj->h_revision & 2)) { /* go visit the more recent version */ obj = (gcptr)obj->h_revision; - if ((gcptr)obj->h_original == prev_obj - && !(prev_obj->h_tid & GCFLAG_VISITED)) { - assert(0); // why never hit? - // prev_obj is the ID copy - prev_obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; - /* see fix_outdated() */ - prev_obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, prev_obj); - } } else { /* it's a stub: keep it if it points to a protected version, @@ -253,12 +264,10 @@ */ assert(obj->h_tid & GCFLAG_STUB); obj = (gcptr)(obj->h_revision - 2); - if (!(obj->h_tid & GCFLAG_PUBLIC) || !(prev_obj->h_original)) { - assert(prev_obj->h_original); // why never hit? - assert(!(obj->h_tid & GCFLAG_PUBLIC)); - /* never?: stub->public where stub is id copy? */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + prev_obj->h_tid |= GCFLAG_VISITED; + keep_original_alive(prev_obj); - prev_obj->h_tid |= GCFLAG_VISITED; assert(*pobj == prev_obj); gcptr obj1 = obj; visit(&obj1); /* recursion, but should be only once */ @@ -291,18 +300,11 @@ gcptr B = (gcptr)obj->h_revision; assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - gcptr id_copy = (gcptr)obj->h_original; - if (id_copy && id_copy != B) { - assert(id_copy == (gcptr)B->h_original); + if (obj->h_original && (gcptr)obj->h_original != B) { + /* if B is original, it will be visited anyway */ + assert(obj->h_original == B->h_original); assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - gcptrlist_insert(&objects_to_trace, id_copy); - } - else { - /* prebuilt originals won't get collected anyway - and if they are not reachable in any other way, - we only ever need their location, not their content */ - } + keep_original_alive(obj); } obj->h_tid |= GCFLAG_VISITED; @@ -323,6 +325,7 @@ } } + static void visit_keep(gcptr obj) { if (!(obj->h_tid & GCFLAG_VISITED)) { @@ -334,6 +337,7 @@ assert(!(obj->h_revision & 2)); visit((gcptr *)&obj->h_revision); } + keep_original_alive(obj); } } diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -266,7 +266,7 @@ return fresh_old_copy; } -inline void copy_to_old_id_copy(gcptr obj, gcptr id) +void copy_to_old_id_copy(gcptr obj, gcptr id) { assert(!is_in_nursery(thread_descriptor, id)); assert(id->h_tid & GCFLAG_OLD); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -1,7 +1,7 @@ #include "stmimpl.h" -inline void copy_to_old_id_copy(gcptr obj, gcptr id); +void copy_to_old_id_copy(gcptr obj, gcptr id); gcptr stm_stub_malloc(struct tx_public_descriptor *pd) { From noreply at buildbot.pypy.org Thu Jul 4 10:27:24 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 4 Jul 2013 10:27:24 +0200 (CEST) Subject: [pypy-commit] stmgc default: add test and fix Message-ID: <20130704082724.33A4A1C1014@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r346:38fcfb8212e2 Date: 2013-07-04 10:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/38fcfb8212e2/ Log: add test and fix diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -220,6 +220,11 @@ hash in h_original */ if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; + /* see fix_outdated() */ + id_copy->h_tid |= GCFLAG_VISITED; + + /* XXX: may not always need tracing? */ gcptrlist_insert(&objects_to_trace, id_copy); } else { diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -298,6 +298,8 @@ undef_macros=['NDEBUG'], define_macros=[('GC_NURSERY', str(16 * WORD)), ('_GC_DEBUG', '2'), + ('_GC_DEBUGPRINTS', '1'), + ('DUMP_EXTRA', '1'), ('GC_PAGE_SIZE', '1000'), ('GC_MIN', '200000'), ('GC_EXPAND', '90000'), @@ -576,6 +578,13 @@ if p1.h_tid & GCFLAG_PREBUILT_ORIGINAL: lib.stm_add_prebuilt_root(p1) +def delegate_original(p1, p2): + assert p1.h_original == 0 + assert p2.h_original == 0 + assert p1 != p2 + p2.h_original = ffi.cast("revision_t", p1) + + def make_public(p1): """Hack at an object returned by oalloc() to force it public.""" assert classify(p1) == "protected" diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -200,6 +200,20 @@ assert p3 != p2 assert p3 == lib.stm_write_barrier(p2) +def test_new_version_id_alive(): + p1 = oalloc(HDR); make_public(p1) + p2 = oalloc(HDR); make_public(p2) + delegate(p1, p2) + delegate_original(p1, p2) + p2.h_original = ffi.cast("revision_t", p1) + lib.stm_push_root(p1) + major_collect() + major_collect() + p1b = lib.stm_pop_root() + check_not_free(p1) # id copy + check_not_free(p2) + + def test_new_version_kill_intermediate(): p1 = oalloc(HDR); make_public(p1) p2 = oalloc(HDR); make_public(p2) @@ -249,6 +263,34 @@ print 'p5:', p5 assert rawgetptr(p1, 0) == p5 +def test_new_version_not_kill_intermediate_original(): + p1 = oalloc_refs(1); make_public(p1) + p2 = oalloc(HDR); make_public(p2) + p3 = oalloc(HDR); make_public(p3) + p4 = oalloc(HDR); make_public(p4) + p5 = oalloc(HDR); make_public(p5) + delegate(p2, p3) + delegate(p3, p4) + delegate(p4, p5) + rawsetptr(p1, 0, p3) + delegate_original(p3, p1) + delegate_original(p3, p2) + delegate_original(p3, p4) + delegate_original(p3, p5) + + lib.stm_push_root(p1) + major_collect() + lib.stm_pop_root() + check_not_free(p1) + check_free_old(p2) + check_not_free(p3) # original + check_free_old(p4) + check_not_free(p5) + assert rawgetptr(p1, 0) == p5 + assert follow_original(p1) == p3 + assert follow_original(p5) == p3 + + def test_prebuilt_version_1(): p1 = lib.pseudoprebuilt(HDR, 42 + HDR) check_prebuilt(p1) From noreply at buildbot.pypy.org Thu Jul 4 11:09:58 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 4 Jul 2013 11:09:58 +0200 (CEST) Subject: [pypy-commit] pypy default: fix failing test Message-ID: <20130704090958.6C0F51C0130@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65182:7617f7f1432c Date: 2013-07-04 11:06 +0200 http://bitbucket.org/pypy/pypy/changeset/7617f7f1432c/ Log: fix failing test diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr), p25, 16, ConstPtr(ptr70), descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) From noreply at buildbot.pypy.org Thu Jul 4 13:20:47 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 4 Jul 2013 13:20:47 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: merge default Message-ID: <20130704112047.8BFD31C0130@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65183:323dac73b895 Date: 2013-07-04 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/323dac73b895/ Log: merge default diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr), p25, 16, ConstPtr(ptr70), descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) From noreply at buildbot.pypy.org Thu Jul 4 13:55:33 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 4 Jul 2013 13:55:33 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix also the duhton target Message-ID: <20130704115533.14B521C3124@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r347:7a86a8b3cbb1 Date: 2013-07-04 13:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/7a86a8b3cbb1/ Log: fix also the duhton target diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -2,10 +2,10 @@ all: duhton_debug duhton duhton: *.c *.h ../c4/*.c ../c4/*.h - gcc -lrt -pthread -g -O2 -o duhton *.c ../c4/stmgc.c -Wall + gcc -pthread -g -O2 -o duhton *.c ../c4/stmgc.c -Wall -lrt duhton_debug: *.c *.h ../c4/*.c ../c4/*.h - gcc -lrt -pthread -g -DDu_DEBUG -D_GC_DEBUG=2 -DGC_NURSERY=2048 -o duhton_debug *.c ../c4/stmgc.c -Wall + gcc -pthread -g -DDu_DEBUG -D_GC_DEBUG=2 -DGC_NURSERY=2048 -o duhton_debug *.c ../c4/stmgc.c -Wall -lrt clean: rm -f duhton duhton_debug From noreply at buildbot.pypy.org Thu Jul 4 14:19:35 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 4 Jul 2013 14:19:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: Make some use of command-line parameters. Very ad-hoc, but works for now Message-ID: <20130704121935.2AC9F1C1001@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r348:2d5fc7e0a9c0 Date: 2013-07-04 14:19 +0200 http://bitbucket.org/pypy/stmgc/changeset/2d5fc7e0a9c0/ Log: Make some use of command-line parameters. Very ad-hoc, but works for now diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -1,20 +1,40 @@ #include "duhton.h" +#define DEFAULT_NUM_THREADS 4 int main(int argc, char **argv) { - char *filename; - int interactive; - if (argc <= 1) { + char *filename = NULL; + int interactive = 1; + int i; + int num_threads = DEFAULT_NUM_THREADS; + + for (i = 1; i < argc; ++i) { + if (strcmp(argv[i], "--help") == 0) { + printf("Duhton: a simple lisp-like language with STM support\n\n"); + printf("Usage: duhton [--help] [--num-threads no] [filename]\n"); + printf(" --help: this help\n"); + printf(" --num-threads : number of threads (default 4)\n\n"); + exit(0); + } else if (strcmp(argv[i], "--num-threads") == 0) { + if (i == argc - 1) { + printf("ERROR: --num-threads requires a parameter\n"); + exit(1); + } + num_threads = atoi(argv[i + 1]); + i++; + } else if (strncmp(argv[i], "--", 2) == 0) { + printf("ERROR: unrecognized parameter %s\n", argv[i]); + } else { + filename = argv[i]; + interactive = 0; + } + } + if (!filename) { filename = "-"; /* stdin */ - interactive = 1; - } - else { - filename = argv[1]; - interactive = 0; - } + } - Du_Initialize(); + Du_Initialize(num_threads); while (1) { if (interactive) { diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -138,7 +138,7 @@ DuObject *rest, int execute_now); DuObject *_Du_GetGlobals(void); -void Du_Initialize(void); +void Du_Initialize(int); void Du_Finalize(void); #define Du_Globals (_Du_GetGlobals()) @@ -182,5 +182,7 @@ } #endif +extern pthread_t *all_threads; +extern int all_threads_count; #endif /* _DUHTON_H_ */ diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -1,6 +1,8 @@ #include "duhton.h" #include +pthread_t *all_threads; +int all_threads_count; static void _du_getargs1(const char *name, DuObject *cons, DuObject *locals, DuObject **a) @@ -561,9 +563,11 @@ return Du_None; } -void Du_Initialize(void) +void Du_Initialize(int num_threads) { stm_initialize(); + all_threads_count = num_threads; + all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn); DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -2,10 +2,6 @@ #include #include -#ifndef NUM_THREADS -#define NUM_THREADS 4 -#endif - static DuConsObject du_pending_transactions = { DuOBJECT_HEAD_INIT(DUTYPE_CONS), @@ -21,15 +17,13 @@ static void run_all_threads(void) { int i; - pthread_t th[NUM_THREADS]; - - for (i = 0; i < NUM_THREADS; i++) { - int status = pthread_create(&th[i], NULL, run_thread, NULL); + for (i = 0; i < all_threads_count; i++) { + int status = pthread_create(&all_threads[i], NULL, run_thread, NULL); if (status != 0) stm_fatalerror("status != 0\n"); } - for (i = 0; i < NUM_THREADS; i++) { - pthread_join(th[i], NULL); + for (i = 0; i < all_threads_count; i++) { + pthread_join(all_threads[i], NULL); } } @@ -88,13 +82,13 @@ else { /* nothing to do, wait */ thread_sleeping++; - if (thread_sleeping == NUM_THREADS) { + if (thread_sleeping == all_threads_count) { pthread_mutex_unlock(&mutex_sleep); } stm_commit_transaction(); pthread_mutex_lock(&mutex_sleep); stm_begin_inevitable_transaction(); - if (thread_sleeping == NUM_THREADS) { + if (thread_sleeping == all_threads_count) { pthread_mutex_unlock(&mutex_sleep); return NULL; } From noreply at buildbot.pypy.org Thu Jul 4 21:32:10 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 4 Jul 2013 21:32:10 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: patch bomb for passing space into all calls of from_shape...() while noting which methods must be tested for subtypes Message-ID: <20130704193210.3D8891C0130@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65184:fd71a983d387 Date: 2013-07-04 22:18 +0300 http://bitbucket.org/pypy/pypy/changeset/fd71a983d387/ Log: patch bomb for passing space into all calls of from_shape...() while noting which methods must be tested for subtypes diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -229,7 +229,7 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return chunks.apply(orig_arr) + return chunks.apply(space, orig_arr) def descr_setitem(self, space, orig_arr, w_index, w_value): try: @@ -238,7 +238,7 @@ except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) - view = chunks.apply(orig_arr) + view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value) def transpose(self, orig_array): @@ -269,14 +269,14 @@ shape, skip) return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - def swapaxes(self, orig_arr, axis1, axis2): + def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] strides = self.get_strides()[:] backstrides = self.get_backstrides()[:] shape[axis1], shape[axis2] = shape[axis2], shape[axis1] strides[axis1], strides[axis2] = strides[axis2], strides[axis1] backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] - return W_NDimArray.new_slice(self.start, strides, + return W_NDimArray.new_slice(space, self.start, strides, backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): @@ -289,13 +289,16 @@ return ArrayBuffer(self) def astype(self, space, dtype): - new_arr = W_NDimArray.from_shape(self.get_shape(), dtype) + strides, backstrides = support.calc_strides(self.get_shape(), dtype, + self.order) + impl = ConcreteArray(self.get_shape(), dtype, self.order, + strides, backstrides) if self.dtype.is_str_or_unicode() and not dtype.is_str_or_unicode(): raise OperationError(space.w_NotImplementedError, space.wrap( "astype(%s) not implemented yet" % self.dtype)) else: - loop.setslice(space, new_arr.get_shape(), new_arr.implementation, self) - return new_arr + loop.setslice(space, impl.get_shape(), impl, self) + return impl class ConcreteArrayNotOwning(BaseConcreteArray): diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -139,7 +139,7 @@ if not new_shape: return self if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(new_shape, self.dtype) + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) arr_iter.setitem(self.value) return arr.implementation @@ -152,7 +152,7 @@ def create_axis_iter(self, shape, dim, cum): raise Exception("axis iter should not happen on scalar") - def swapaxes(self, orig_array, axis1, axis2): + def swapaxes(self, space, orig_array, axis1, axis2): raise Exception("should not be called") def fill(self, w_value): @@ -166,7 +166,7 @@ return space.wrap(0) def astype(self, space, dtype): - return W_NDimArray.new_scalar(space, dtype, self.value) + raise Exception("should not be called") def base(self): return None diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -126,7 +126,7 @@ axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(arr.get_shape(), dtype) + index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -31,7 +31,7 @@ self.implementation = implementation @staticmethod - def from_shape(shape, dtype, order='C', subtype_and_space=(None, None)): + def from_shape(space, shape, dtype, order='C', subtype=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -40,16 +40,14 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) - if subtype_and_space[0]: - space = subtype_and_space[1] - subtype = subtype_and_space[0] + if subtype: ret = space.allocate_instance(W_NDimArray, subtype) W_NDimArray.__init__(ret, impl) return ret return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(shape, storage, dtype, order='C', owning=False, subtype_and_space=(None, None)): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, subtype=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -60,21 +58,19 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) - if subtype_and_space[0]: - space = subtype_and_space[1] - subtype = subtype_and_space[0] + if subtype: ret = space.allocate_instance(W_NDimArray, subtype) W_NDimArray.__init__(ret, impl) return ret return W_NDimArray(impl) @staticmethod - def new_slice(offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): from pypy.module.micronumpy.arrayimpl import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) - return W_NDimArray(impl) + return wrap_impl(space, orig_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -88,7 +88,7 @@ y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype, subtype=arr) return loop.where(out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2): @@ -131,7 +131,8 @@ arr.get_dtype()) if _axis < 0 or len(arr.get_shape()) <= _axis: raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) - res = W_NDimArray.from_shape(shape, dtype, 'C') + # concatenate does not handle ndarray subtypes, it always returns a ndarray + res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: @@ -139,7 +140,7 @@ continue chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, arr.get_shape()[_axis]) - Chunks(chunks).apply(res).implementation.setslice(space, arr) + Chunks(chunks).apply(space, res).implementation.setslice(space, arr) axis_start += arr.get_shape()[_axis] return res @@ -150,21 +151,21 @@ arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), subtype=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, - orig_size)]).apply(res).implementation.setslice(space, arr) + orig_size)]).apply(space, res).implementation.setslice(space, arr) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), subtype=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) - Chunks(chunks).apply(res).implementation.setslice(space, arr) + Chunks(chunks).apply(space, res).implementation.setslice(space, arr) return res def count_nonzero(space, w_obj): @@ -261,7 +262,7 @@ else: shape = (shape[:axis2] + shape[axis2 + 1:axis1] + shape[axis1 + 1:] + [size]) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) if size == 0: return out if shapelen == 2: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -41,7 +41,7 @@ dtype = w_arr_list[0].get_dtype() for w_arr in w_arr_list[1:]: dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) - out = base.W_NDimArray.from_shape(shape, dtype) + out = base.W_NDimArray.from_shape(space, shape, dtype) return out diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -64,8 +64,8 @@ base_iter.next_skip_x(start) if length == 1: return base_iter.getitem() - res = W_NDimArray.from_shape([length], base.get_dtype(), - base.get_order()) + res = W_NDimArray.from_shape(space, [length], base.get_dtype(), + base.get_order(), subtype=base) return loop.flatiter_getitem(res, base_iter, step) def descr_setitem(self, space, w_idx, w_value): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -85,7 +85,7 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - res = W_NDimArray.from_shape(res_shape, self.get_dtype()) + res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), subtype=self) return loop.getitem_filter(res, self, arr) def setitem_filter(self, space, idx, val): @@ -145,9 +145,10 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return chunks.apply(self) + return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] - res = W_NDimArray.from_shape(shape, self.get_dtype(), self.get_order()) + res = W_NDimArray.from_shape(space, shape, self.get_dtype(), + self.get_order(), subtype=self) if not res.get_size(): return res return loop.getitem_array_int(space, self, res, iter_shape, indexes, @@ -161,7 +162,7 @@ # w_index is a list of slices w_value = convert_to_array(space, w_value) chunks = self.implementation._prepare_slice_args(space, w_index) - view = chunks.apply(self) + view = chunks.apply(space, self) view.implementation.setslice(space, w_value) return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, @@ -259,14 +260,14 @@ return self.implementation.get_scalar_value() def descr_copy(self, space): - return W_NDimArray(self.implementation.copy(space)) + return wrap_impl(space, self, self.implementation.copy(space)) def descr_get_real(self, space): - return W_NDimArray(self.implementation.get_real(self)) + return wrap_impl(space, self, self.implementation.get_real(self)) def descr_get_imag(self, space): ret = self.implementation.get_imag(self) - return W_NDimArray(ret) + return wrap_impl(space, self, ret) def descr_set_real(self, space, w_value): # copy (broadcast) values into self @@ -326,7 +327,7 @@ """ if self.is_scalar(): return self - return self.implementation.swapaxes(self, axis1, axis2) + return self.implementation.swapaxes(space, self, axis1, axis2) def descr_tolist(self, space): if len(self.get_shape()) == 0: @@ -446,17 +447,25 @@ # we must do that, because we need a working set. otherwise # we would modify the array in-place. Use this to our advantage # by converting nonnative byte order. + if self.is_scalar(): + return space.wrap(0) s = self.get_dtype().name if not self.get_dtype().native: s = s[1:] dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] contig = self.implementation.astype(space, dtype) + assert isinstance(contig, W_NDimArray) return contig.implementation.argsort(space, w_axis) def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - return self.implementation.astype(space, dtype) + impl = self.implementation + if isinstance(impl, scalar.Scalar): + return W_NDimArray.new_scalar(space, dtype, impl.value) + else: + new_impl = impl.astype(space, dtype) + return wrap_impl(space, self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -471,7 +480,7 @@ loop.byteswap(self.implementation, self.implementation) return self else: - res = W_NDimArray.from_shape(self.get_shape(), self.get_dtype()) + res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), subtype=self) loop.byteswap(self.implementation, res.implementation) return res @@ -564,7 +573,7 @@ if space.is_none(w_out): if self.get_dtype().is_bool_type(): #numpy promotes bool.round() to float16. Go figure. - w_out = W_NDimArray.from_shape(self.get_shape(), + w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) else: w_out = None @@ -765,7 +774,7 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - result = W_NDimArray.from_shape(out_shape, dtype) + result = W_NDimArray.from_shape(space, out_shape, dtype, subtype=self) # This is the place to add fpypy and blas return loop.multidim_dot(space, self, other, result, dtype, other_critical_dim) @@ -889,7 +898,10 @@ isfortran = space.getitem(w_state, space.wrap(3)) storage = space.getitem(w_state, space.wrap(4)) - self.implementation = W_NDimArray.from_shape_and_storage([space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, owning=True).implementation + self.implementation = W_NDimArray.from_shape_and_storage(space, + [space.int_w(i) for i in space.listview(shape)], + rffi.str2charp(space.str_w(storage), track_allocation=False), + dtype, owning=True).implementation @unwrap_spec(offset=int, order=str) @@ -905,8 +917,8 @@ if not shape: return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): - return W_NDimArray.from_shape(shape, dtype, order) - return W_NDimArray.from_shape(shape, dtype, order, (w_subtype, space)) + return W_NDimArray.from_shape(space, shape, dtype, order) + return W_NDimArray.from_shape(space, shape, dtype, order, w_subtype) @unwrap_spec(addr=int) def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subclass=None): @@ -922,10 +934,10 @@ w_dtype)) shape = _find_shape(space, w_shape, dtype) if w_subclass: - return W_NDimArray.from_shape_and_storage(shape, storage, dtype, 'C', - False, (w_subclass, space)) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + 'C', False, w_subclass) else: - return W_NDimArray.from_shape_and_storage(shape, storage, dtype) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) W_NDimArray.typedef = TypeDef( "ndarray", @@ -1105,7 +1117,7 @@ dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape - arr = W_NDimArray.from_shape(shape, dtype, order=order) + arr = W_NDimArray.from_shape(space, shape, dtype, order=order) arr_iter = arr.create_iter() for w_elem in elems_w: arr_iter.setitem(dtype.coerce(space, w_elem)) @@ -1120,7 +1132,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(shape, dtype=dtype, order=order)) + return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): @@ -1130,7 +1142,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - arr = W_NDimArray.from_shape(shape, dtype=dtype, order=order) + arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) arr.fill(one) return space.wrap(arr) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -50,7 +50,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([num_items], dtype=dtype) + a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) ai = a.create_iter() for val in items: ai.setitem(val) @@ -71,7 +71,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([count], dtype=dtype) + a = W_NDimArray.from_shape(space, [count], dtype=dtype) loop.fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -181,7 +181,8 @@ temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() - temp = W_NDimArray.from_shape(temp_shape, dtype) + temp = W_NDimArray.from_shape(space, temp_shape, dtype, + subtype=self) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -207,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype, subtype=self) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -216,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape([obj.get_size()], dtype) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, subtype=self) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out @@ -295,7 +296,7 @@ return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) - return loop.call1(shape, self.func, calc_dtype, res_dtype, + return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out) @@ -370,7 +371,7 @@ return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) - return loop.call2(new_shape, self.func, calc_dtype, + return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out) @@ -450,7 +451,7 @@ return dt2 return dt1 return dt2 - else: + else: # increase to the next signed type dtypenum = dt2.num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -58,11 +58,11 @@ def __init__(self, name): self.name = name - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(arr.start + ofs, arr.get_strides(), + return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), arr.get_backstrides(), arr.shape, arr, orig_arr, subdtype) @@ -81,13 +81,13 @@ assert s >= 0 return shape[:] + old_shape[s:] - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation shape = self.extend_shape(arr.shape) r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), self.l) _, start, strides, backstrides = r - return W_NDimArray.new_slice(start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, orig_arr) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -19,9 +19,9 @@ reds = ['shape', 'w_lhs', 'w_rhs', 'out', 'left_iter', 'right_iter', 'out_iter']) -def call2(shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): +def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -48,9 +48,9 @@ reds = ['shape', 'w_obj', 'out', 'obj_iter', 'out_iter']) -def call1(shape, func, calc_dtype, res_dtype, w_obj, out): +def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, subtype=w_obj) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) @@ -437,7 +437,7 @@ def tostring(space, arr): builder = StringBuilder() iter = arr.create_iter() - res_str = W_NDimArray.from_shape([1], arr.get_dtype(), order='C') + res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().itemtype.get_element_size() res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), res_str.implementation.get_storage_as_int(space)) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -0,0 +1,72 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestSupport(BaseNumpyAppTest): + def setup_class(cls): + from numpypy import ndarray + BaseNumpyAppTest.setup_class.im_func(cls) + class NoNew(ndarray): + def __new__(cls): + raise ValueError('should not call __new__') + def __array_finalize(self, obj): + self.called_finalize = True + class SubType(ndarray): + def __new__(cls): + cls.called_new = True + return cls + def __array_finalize(self, obj): + self.called_finalize = True + cls.w_NoNew = cls.space.wrap(NoNew) + cls.w_SubType = cls.space.wrap(SubType) + + def test_sub_where(self): + from numpypy import where, ones, zeros, array + a = array([1, 2, 3, 0, -3]) + v = a.view(self.NoNew) + assert False + + def test_repeat(self): + assert False + + def test_flatiter(self): + assert False + + def test_getitem_filter(self): + assert False + + def test_getitem_array_int(self): + assert False + + def test_round(self): + from numpypy import array + a = array(range(10), dtype=float).view(self.NoNew) + # numpy compatibility + b = a.round(decimal=0) + assert isinstance(b, self.NoNew) + b = a.round(decimal=1) + assert not isinstance(b, self.NoNew) + b = a.round(decimal=-1) + assert not isinstance(b, self.NoNew) + + def test_dot(self): + # the returned type is that of the first argument + assert False + + def test_reduce(self): + # i.e. sum, max + # test for out as well + assert False + + def test_call2(self): + # c + a vs. a + c, what about array priority? + assert False + + def test_call1(self): + assert False + + def test_astype(self): + assert False + + def test_reshape(self): + assert False From noreply at buildbot.pypy.org Thu Jul 4 22:52:53 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 4 Jul 2013 22:52:53 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: fix tests, fix obvious mistakes Message-ID: <20130704205253.411831C313A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65185:6a19e19e18a7 Date: 2013-07-04 23:21 +0300 http://bitbucket.org/pypy/pypy/changeset/6a19e19e18a7/ Log: fix tests, fix obvious mistakes diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -41,7 +41,7 @@ impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) if subtype: - ret = space.allocate_instance(W_NDimArray, subtype) + ret = space.allocate_instance(W_NDimArray, space.type(subtype)) W_NDimArray.__init__(ret, impl) return ret return W_NDimArray(impl) @@ -59,7 +59,7 @@ impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) if subtype: - ret = space.allocate_instance(W_NDimArray, subtype) + ret = space.allocate_instance(W_NDimArray, space.type(subtype)) W_NDimArray.__init__(ret, impl) return ret return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -454,8 +454,7 @@ s = s[1:] dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] contig = self.implementation.astype(space, dtype) - assert isinstance(contig, W_NDimArray) - return contig.implementation.argsort(space, w_axis) + return contig.argsort(space, w_axis) def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -182,7 +182,7 @@ if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, - subtype=self) + subtype=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -208,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(space, shape, dtype, subtype=self) + out = W_NDimArray.from_shape(space, shape, dtype, subtype=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -217,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, subtype=self) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, subtype=obj) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -24,8 +24,8 @@ def get_size(self): return 1 -def create_slice(a, chunks): - return Chunks(chunks).apply(W_NDimArray(a)).implementation +def create_slice(space, a, chunks): + return Chunks(chunks).apply(space, W_NDimArray(a)).implementation def create_array(*args, **kwargs): @@ -46,100 +46,100 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] - a = create_array([1, 0, 7], MockDtype(), order='C') + a = create_array(self.space, [1, 0, 7], MockDtype(), order='C') assert a.strides == [7, 7, 1] assert a.backstrides == [0, 0, 6] def test_create_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -207,7 +207,8 @@ raw_storage_setitem(storage, i, rffi.cast(rffi.UCHAR, i)) # dtypes = get_dtype_cache(self.space) - w_array = W_NDimArray.from_shape_and_storage([2, 2], storage, dtypes.w_int8dtype) + w_array = W_NDimArray.from_shape_and_storage(self.space, [2, 2], + storage, dtypes.w_int8dtype) def get(i, j): return w_array.getitem(self.space, [i, j]).value assert get(0, 0) == 0 From noreply at buildbot.pypy.org Thu Jul 4 22:52:54 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 4 Jul 2013 22:52:54 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: fix for existing tests Message-ID: <20130704205254.D12801C313A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65186:9c65421e84ec Date: 2013-07-04 23:52 +0300 http://bitbucket.org/pypy/pypy/changeset/9c65421e84ec/ Log: fix for existing tests diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -41,10 +41,16 @@ impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) if subtype: - ret = space.allocate_instance(W_NDimArray, space.type(subtype)) + if space.isinstance_w(subtype, space.w_type): + #got type, probably from descr_XXX + ret = space.allocate_instance(W_NDimArray, subtype) + else: + #got instance + ret = space.allocate_instance(W_NDimArray, space.type(subtype)) W_NDimArray.__init__(ret, impl) - return ret - return W_NDimArray(impl) + else: + ret = W_NDimArray(impl) + return ret @staticmethod def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, subtype=None): @@ -59,7 +65,12 @@ impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) if subtype: - ret = space.allocate_instance(W_NDimArray, space.type(subtype)) + if space.isinstance_w(subtype, space.w_type): + #got type, probably from descr_XXX + ret = space.allocate_instance(W_NDimArray, subtype) + else: + #got instance + ret = space.allocate_instance(W_NDimArray, space.type(subtype)) W_NDimArray.__init__(ret, impl) return ret return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -627,9 +627,13 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - if not w_type and w_dtype and w_dtype.issubtype(space.gettypefor(W_NDimArray)): - w_type = w_dtype - w_dtype = None + if not w_type and w_dtype: + try: + if w_dtype.issubtype(space.gettypefor(W_NDimArray)): + w_type = w_dtype + w_dtype = None + except: + pass if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), From noreply at buildbot.pypy.org Fri Jul 5 00:04:44 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 5 Jul 2013 00:04:44 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: first pass at differentiating types from instances and handling __array_finalize__ Message-ID: <20130704220444.D2F3A1C34F4@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65187:31b9c2717719 Date: 2013-07-05 01:03 +0300 http://bitbucket.org/pypy/pypy/changeset/31b9c2717719/ Log: first pass at differentiating types from instances and handling __array_finalize__ diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -11,12 +11,17 @@ isinstance(w_obj, W_NDimArray)) def wrap_impl(space, cls, impl): - if space.is_w(space.type(cls), space.gettypefor(W_NDimArray)): + if cls is None or space.is_w(space.type(cls), space.gettypefor(W_NDimArray)): ret = W_NDimArray(impl) else: - ret = space.allocate_instance(W_NDimArray, space.type(cls)) - print 'created',space.type(ret) + if space.isinstance_w(cls, space.w_type): + #got type, either from __new__ or from view casting + ret = space.allocate_instance(W_NDimArray, cls) + else: + ret = space.allocate_instance(W_NDimArray, space.type(cls)) W_NDimArray.__init__(ret, impl) + space.call_function(space.getattr(ret, space.wrap('__array_finalize__')), + cls) return ret class ArrayArgumentException(Exception): @@ -31,7 +36,7 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', subtype=None): + def from_shape(space, shape, dtype, order='C', subtype=None, is_new=False): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -42,12 +47,25 @@ backstrides) if subtype: if space.isinstance_w(subtype, space.w_type): - #got type, probably from descr_XXX + #got type, either from __new__ or from view casting ret = space.allocate_instance(W_NDimArray, subtype) + W_NDimArray.__init__(ret, impl) + if is_new: + space.call_function(space.getattr(ret, + space.wrap('__array_finalize__')), + space.w_None) + else: + # view casting, call finalize + space.call_function(space.getattr(ret, + space.wrap('__array_finalize__')), + subtype) else: #got instance ret = space.allocate_instance(W_NDimArray, space.type(subtype)) - W_NDimArray.__init__(ret, impl) + W_NDimArray.__init__(ret, impl) + space.call_function(space.getattr(ret, + space.wrap('__array_finalize__')), + subtype) else: ret = W_NDimArray(impl) return ret @@ -68,10 +86,17 @@ if space.isinstance_w(subtype, space.w_type): #got type, probably from descr_XXX ret = space.allocate_instance(W_NDimArray, subtype) + W_NDimArray.__init__(ret, impl) + space.call_function(space.getattr(ret, + space.wrap('__array_finalize__')), + space.w_None) else: #got instance ret = space.allocate_instance(W_NDimArray, space.type(subtype)) - W_NDimArray.__init__(ret, impl) + W_NDimArray.__init__(ret, impl) + space.call_function(space.getattr(ret, + space.wrap('__array_finalize__')), + subtype) return ret return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -664,12 +664,7 @@ "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize v = impl.get_view(self, dtype, new_shape) - if w_type is not None: - ret = space.allocate_instance(W_NDimArray, w_type) - W_NDimArray.__init__(ret, v) - return ret - return W_NDimArray(v) - + return wrap_impl(space, w_type, v) # --------------------- operations ---------------------------- @@ -906,6 +901,8 @@ rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, owning=True).implementation + def descr___array_finalize__(self, space, w_obj): + pass @unwrap_spec(offset=int, order=str) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, @@ -921,7 +918,8 @@ return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) - return W_NDimArray.from_shape(space, shape, dtype, order, w_subtype) + ret = W_NDimArray.from_shape(space, shape, dtype, order, w_subtype, is_new=True) + return ret @unwrap_spec(addr=int) def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subclass=None): @@ -1068,6 +1066,7 @@ W_NDimArray.fdel___pypy_data__), __reduce__ = interp2app(W_NDimArray.descr_reduce), __setstate__ = interp2app(W_NDimArray.descr_setstate), + __array_finalize__ = interp2app(W_NDimArray.descr___array_finalize__), ) @unwrap_spec(ndmin=int, copy=bool, subok=bool) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -4,8 +4,9 @@ class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + ''' from numpypy import ndarray - BaseNumpyAppTest.setup_class.im_func(cls) class NoNew(ndarray): def __new__(cls): raise ValueError('should not call __new__') @@ -17,8 +18,42 @@ return cls def __array_finalize(self, obj): self.called_finalize = True - cls.w_NoNew = cls.space.wrap(NoNew) - cls.w_SubType = cls.space.wrap(SubType) + cls.w_NoNew = cls.space.wrap(NoNew) + cls.w_SubType = cls.space.wrap(SubType) + ''' + + def test_finalize(self): + #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray + import numpypy as np + class InfoArray(np.ndarray): + def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + strides=None, order='C', info=None): + obj = np.ndarray.__new__(subtype, shape, dtype, buffer, + offset, strides, order) + obj.info = info + return obj + + def __array_finalize__(self, obj): + if obj is None: + print 'finazlize with None' + return + print 'finalize with something' + self.info = getattr(obj, 'info', None) + obj = InfoArray(shape=(3,)) + assert isinstance(obj, InfoArray) + assert obj.info is None + obj = InfoArray(shape=(3,), info='information') + assert obj.info == 'information' + v = obj[1:] + assert isinstance(v, InfoArray) + assert v.base is obj + assert v.info == 'information' + arr = np.arange(10) + print '1' + cast_arr = arr.view(InfoArray) + assert isinstance(cast_arr, InfoArray) + assert cast_arr.base is arr + assert cast_arr.info is None def test_sub_where(self): from numpypy import where, ones, zeros, array From noreply at buildbot.pypy.org Fri Jul 5 09:46:23 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 09:46:23 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix: don't trace stubs Message-ID: <20130705074623.46C871C01C0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r349:de365b519570 Date: 2013-07-05 09:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/de365b519570/ Log: fix: don't trace stubs diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -225,7 +225,8 @@ id_copy->h_tid |= GCFLAG_VISITED; /* XXX: may not always need tracing? */ - gcptrlist_insert(&objects_to_trace, id_copy); + if (!(id_copy->h_tid & GCFLAG_STUB)) + gcptrlist_insert(&objects_to_trace, id_copy); } else { /* prebuilt originals won't get collected anyway diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -5,7 +5,7 @@ gcc -pthread -g -O2 -o duhton *.c ../c4/stmgc.c -Wall -lrt duhton_debug: *.c *.h ../c4/*.c ../c4/*.h - gcc -pthread -g -DDu_DEBUG -D_GC_DEBUG=2 -DGC_NURSERY=2048 -o duhton_debug *.c ../c4/stmgc.c -Wall -lrt + gcc -pthread -g -DDu_DEBUG -D_GC_DEBUGPRINTS=1 -DGC_NURSERY=2048 -o duhton_debug *.c ../c4/stmgc.c -Wall -lrt clean: rm -f duhton duhton_debug From noreply at buildbot.pypy.org Fri Jul 5 09:46:24 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 09:46:24 +0200 (CEST) Subject: [pypy-commit] stmgc default: merge Message-ID: <20130705074624.8988D1C01C0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r350:93309df73f62 Date: 2013-07-05 09:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/93309df73f62/ Log: merge diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -1,20 +1,40 @@ #include "duhton.h" +#define DEFAULT_NUM_THREADS 4 int main(int argc, char **argv) { - char *filename; - int interactive; - if (argc <= 1) { + char *filename = NULL; + int interactive = 1; + int i; + int num_threads = DEFAULT_NUM_THREADS; + + for (i = 1; i < argc; ++i) { + if (strcmp(argv[i], "--help") == 0) { + printf("Duhton: a simple lisp-like language with STM support\n\n"); + printf("Usage: duhton [--help] [--num-threads no] [filename]\n"); + printf(" --help: this help\n"); + printf(" --num-threads : number of threads (default 4)\n\n"); + exit(0); + } else if (strcmp(argv[i], "--num-threads") == 0) { + if (i == argc - 1) { + printf("ERROR: --num-threads requires a parameter\n"); + exit(1); + } + num_threads = atoi(argv[i + 1]); + i++; + } else if (strncmp(argv[i], "--", 2) == 0) { + printf("ERROR: unrecognized parameter %s\n", argv[i]); + } else { + filename = argv[i]; + interactive = 0; + } + } + if (!filename) { filename = "-"; /* stdin */ - interactive = 1; - } - else { - filename = argv[1]; - interactive = 0; - } + } - Du_Initialize(); + Du_Initialize(num_threads); while (1) { if (interactive) { diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -138,7 +138,7 @@ DuObject *rest, int execute_now); DuObject *_Du_GetGlobals(void); -void Du_Initialize(void); +void Du_Initialize(int); void Du_Finalize(void); #define Du_Globals (_Du_GetGlobals()) @@ -182,5 +182,7 @@ } #endif +extern pthread_t *all_threads; +extern int all_threads_count; #endif /* _DUHTON_H_ */ diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -1,6 +1,8 @@ #include "duhton.h" #include +pthread_t *all_threads; +int all_threads_count; static void _du_getargs1(const char *name, DuObject *cons, DuObject *locals, DuObject **a) @@ -561,9 +563,11 @@ return Du_None; } -void Du_Initialize(void) +void Du_Initialize(int num_threads) { stm_initialize(); + all_threads_count = num_threads; + all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn); DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -2,10 +2,6 @@ #include #include -#ifndef NUM_THREADS -#define NUM_THREADS 4 -#endif - static DuConsObject du_pending_transactions = { DuOBJECT_HEAD_INIT(DUTYPE_CONS), @@ -21,15 +17,13 @@ static void run_all_threads(void) { int i; - pthread_t th[NUM_THREADS]; - - for (i = 0; i < NUM_THREADS; i++) { - int status = pthread_create(&th[i], NULL, run_thread, NULL); + for (i = 0; i < all_threads_count; i++) { + int status = pthread_create(&all_threads[i], NULL, run_thread, NULL); if (status != 0) stm_fatalerror("status != 0\n"); } - for (i = 0; i < NUM_THREADS; i++) { - pthread_join(th[i], NULL); + for (i = 0; i < all_threads_count; i++) { + pthread_join(all_threads[i], NULL); } } @@ -88,13 +82,13 @@ else { /* nothing to do, wait */ thread_sleeping++; - if (thread_sleeping == NUM_THREADS) { + if (thread_sleeping == all_threads_count) { pthread_mutex_unlock(&mutex_sleep); } stm_commit_transaction(); pthread_mutex_lock(&mutex_sleep); stm_begin_inevitable_transaction(); - if (thread_sleeping == NUM_THREADS) { + if (thread_sleeping == all_threads_count) { pthread_mutex_unlock(&mutex_sleep); return NULL; } From noreply at buildbot.pypy.org Fri Jul 5 10:46:59 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 10:46:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix test Message-ID: <20130705084659.57C9C1C05DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65188:3f384e7615a0 Date: 2013-07-04 11:10 +0200 http://bitbucket.org/pypy/pypy/changeset/3f384e7615a0/ Log: fix test diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py --- a/rpython/translator/stm/test/test_inevitable.py +++ b/rpython/translator/stm/test/test_inevitable.py @@ -160,7 +160,7 @@ extfunc() res = self.interpret_inevitable(f1, []) - assert res == 'direct_call' + assert res == 'extfunc()' def test_rpy_direct_call(self): def f2(): From noreply at buildbot.pypy.org Fri Jul 5 10:47:00 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 10:47:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: getting tests to run with questionable methods Message-ID: <20130705084700.B519A1C0E1C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65189:4820d8ea75ff Date: 2013-07-05 08:20 +0200 http://bitbucket.org/pypy/pypy/changeset/4820d8ea75ff/ Log: getting tests to run with questionable methods diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -493,11 +493,11 @@ def _setup_barriers_for_stm(self): from rpython.memory.gc import stmgc WBDescr = WriteBarrierDescr - self.P2Rdescr = WBDescr(self, (stmgc.GCFLAG_GLOBAL, 'P2R', + self.P2Rdescr = WBDescr(self, (0, 'P2R', 'stm_DirectReadBarrier')) - self.P2Wdescr = WBDescr(self, (stmgc.GCFLAG_NOT_WRITTEN, 'P2W', + self.P2Wdescr = WBDescr(self, (0, 'P2W', 'stm_WriteBarrier')) - self.R2Wdescr = WBDescr(self, (stmgc.GCFLAG_NOT_WRITTEN, 'R2W', + self.R2Wdescr = WBDescr(self, (0, 'R2W', 'stm_WriteBarrierFromReady')) self.write_barrier_descr = "wbdescr: do not use" # diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -35,6 +35,7 @@ tdescr = get_size_descr(self.gc_ll_descr, T) tdescr.tid = 5678 tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + tydescr = get_field_descr(self.gc_ll_descr, T, 'y') # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1,8 +1,9 @@ from rpython.jit.backend.llsupport.descr import * from rpython.jit.backend.llsupport.gc import * from rpython.jit.metainterp.gc import get_description -from rpython.jit.backend.llsupport.test.test_rewrite import RewriteTests - +from rpython.jit.backend.llsupport.test.test_rewrite import ( + RewriteTests, BaseFakeCPU) +from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory class TestStm(RewriteTests): def setup_method(self, meth): @@ -17,11 +18,12 @@ self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, really_not_translated=True) # - class FakeCPU(object): + class FakeCPU(BaseFakeCPU): def sizeof(self, STRUCT): descr = SizeDescrWithVTable(104) descr.tid = 9315 return descr + self.cpu = FakeCPU() def check_rewrite(self, frm_operations, to_operations, **namespace): @@ -48,6 +50,8 @@ """) def test_rewrite_setfield_gc_const(self): + TP = lltype.GcArray(lltype.Signed) + NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) self.check_rewrite(""" [p1, p2] setfield_gc(ConstPtr(t), p2, descr=tzdescr) @@ -58,7 +62,7 @@ cond_call_gc_wb(p3, 0, descr=P2Wdescr) setfield_gc(p3, p2, descr=tzdescr) jump() - """) + """, t=NULL) def test_rewrite_setfield_gc_on_local(self): self.check_rewrite(""" @@ -164,6 +168,8 @@ """) def test_rewrite_getfield_gc_const(self): + TP = lltype.GcArray(lltype.Signed) + NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) self.check_rewrite(""" [p1] p2 = getfield_gc(ConstPtr(t), descr=tzdescr) @@ -174,7 +180,7 @@ cond_call_gc_wb(p3, 0, descr=P2Rdescr) p2 = getfield_gc(p3, descr=tzdescr) jump(p2) - """) + """, t=NULL) # XXX could do better: G2Rdescr def test_rewrite_getarrayitem_gc(self): @@ -300,62 +306,62 @@ def test_getfield_raw(self): self.check_rewrite(""" [i1, i2] - i3 = getfield_raw(i1, descr=?) + i3 = getfield_raw(i1, descr=h_original; + /* prebuilt original objects may have a predifined + hash in h_original */ + if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; + /* see fix_outdated() */ + id_copy->h_tid |= GCFLAG_VISITED; + + /* XXX: may not always need tracing? */ + gcptrlist_insert(&objects_to_trace, id_copy); + } + else { + /* prebuilt originals won't get collected anyway + and if they are not reachable in any other way, + we only ever need their location, not their content */ + } + } +} + static void visit(gcptr *pobj) { gcptr obj = *pobj; @@ -227,6 +250,8 @@ obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; gcptrlist_insert(&objects_to_trace, obj); + + keep_original_alive(obj); } } else if (obj->h_tid & GCFLAG_PUBLIC) { @@ -247,6 +272,8 @@ obj = (gcptr)(obj->h_revision - 2); if (!(obj->h_tid & GCFLAG_PUBLIC)) { prev_obj->h_tid |= GCFLAG_VISITED; + keep_original_alive(prev_obj); + assert(*pobj == prev_obj); gcptr obj1 = obj; visit(&obj1); /* recursion, but should be only once */ @@ -257,6 +284,9 @@ } if (!(obj->h_revision & 3)) { + /* obj is neither a stub nor a most recent revision: + completely ignore obj->h_revision */ + obj = (gcptr)obj->h_revision; assert(obj->h_tid & GCFLAG_PUBLIC); prev_obj->h_revision = (revision_t)obj; @@ -275,7 +305,14 @@ assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); gcptr B = (gcptr)obj->h_revision; assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - + + if (obj->h_original && (gcptr)obj->h_original != B) { + /* if B is original, it will be visited anyway */ + assert(obj->h_original == B->h_original); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + keep_original_alive(obj); + } + obj->h_tid |= GCFLAG_VISITED; B->h_tid |= GCFLAG_VISITED; assert(!(obj->h_tid & GCFLAG_STUB)); @@ -294,6 +331,7 @@ } } + static void visit_keep(gcptr obj) { if (!(obj->h_tid & GCFLAG_VISITED)) { @@ -305,6 +343,7 @@ assert(!(obj->h_revision & 2)); visit((gcptr *)&obj->h_revision); } + keep_original_alive(obj); } } @@ -376,8 +415,24 @@ outdated, it will be found at that time */ gcptr R = item->addr; gcptr L = item->val; + + /* Objects that were not visited yet must have the PUB_TO_PRIV + flag. Except if that transaction will abort anyway, then it + may be removed from a previous major collection that didn't + fix the PUB_TO_PRIV because the transaction was going to + abort anyway: + 1. minor_collect before major collect (R->L, R is outdated, abort) + 2. major collect removes flag + 3. major collect again, same thread, no time to abort + 4. flag still removed + */ + assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, + R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); visit_keep(R); if (L != NULL) { + /* minor collection found R->L in public_to_young + and R was modified. It then sets item->val to NULL and wants + to abort later. */ revision_t v = L->h_revision; visit_keep(L); /* a bit of custom logic here: if L->h_revision used to @@ -385,8 +440,10 @@ keep this property, even though visit_keep(L) might decide it would be better to make it point to a more recent copy. */ - if (v == (revision_t)R) + if (v == (revision_t)R) { + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); L->h_revision = v; /* restore */ + } } } G2L_LOOP_END; @@ -449,6 +506,7 @@ just removing it is very wrong --- we want 'd' to abort. */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; } @@ -483,14 +541,16 @@ /* We are now after visiting all objects, and we know the * transaction isn't aborting because of this collection. We have * cleared GCFLAG_PUBLIC_TO_PRIVATE from public objects at the end - * of the chain. Now we have to set it again on public objects that - * have a private copy. + * of the chain (head revisions). Now we have to set it again on + * public objects that have a private copy. */ wlog_t *item; dprintf(("fix public_to_private on thread %p\n", d)); G2L_LOOP_FORWARD(d->public_to_private, item) { + assert(item->addr->h_tid & GCFLAG_VISITED); + assert(item->val->h_tid & GCFLAG_VISITED); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -45,7 +45,12 @@ void stmgc_done_nursery(void) { struct tx_descriptor *d = thread_descriptor; - assert(!minor_collect_anything_to_do(d)); + /* someone may have called minor_collect_soon() + inbetween the preceeding minor_collect() and + this assert (committransaction() -> + updatechainheads() -> stub_malloc() -> ...): */ + assert(!minor_collect_anything_to_do(d) + || d->nursery_current == d->nursery_end); stm_free(d->nursery_base, GC_NURSERY); gcptrlist_delete(&d->old_objects_to_trace); @@ -262,7 +267,7 @@ return fresh_old_copy; } -inline void copy_to_old_id_copy(gcptr obj, gcptr id) +void copy_to_old_id_copy(gcptr obj, gcptr id) { assert(!is_in_nursery(thread_descriptor, id)); assert(id->h_tid & GCFLAG_OLD); @@ -315,6 +320,7 @@ *root = fresh_old_copy; /* add 'fresh_old_copy' to the list of objects to trace */ + assert(!(fresh_old_copy->h_tid & GCFLAG_PUBLIC)); gcptrlist_insert(&d->old_objects_to_trace, fresh_old_copy); } } @@ -426,6 +432,7 @@ gcptr P = items[i]; assert(P->h_tid & GCFLAG_PUBLIC); assert(P->h_tid & GCFLAG_OLD); + assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); revision_t v = ACCESS_ONCE(P->h_revision); wlog_t *item; @@ -474,7 +481,18 @@ assert(obj->h_tid & GCFLAG_OLD); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); - obj->h_tid |= GCFLAG_WRITE_BARRIER; + + /* We add the WRITE_BARRIER flag to objects here, but warning: + we may occasionally see a PUBLIC object --- one that was + a private/protected object when it was added to + old_objects_to_trace, and has been stolen. So we have to + check and not do any change the obj->h_tid in that case. + Otherwise this conflicts with the rule that we may only + modify obj->h_tid of a public object in order to add + PUBLIC_TO_PRIVATE. + */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) + obj->h_tid |= GCFLAG_WRITE_BARRIER; stmgc_trace(obj, &visit_if_young); } @@ -672,6 +690,7 @@ gcptr P = stmgcpage_malloc(allocate_size); memset(P, 0, allocate_size); P->h_tid = tid | GCFLAG_OLD; + assert(!(P->h_tid & GCFLAG_PUBLIC)); gcptrlist_insert(&d->old_objects_to_trace, P); return P; } diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -637f6c9d19f7 +38fcfb8212e2 diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -2,7 +2,7 @@ #include "stmimpl.h" -inline void copy_to_old_id_copy(gcptr obj, gcptr id); +void copy_to_old_id_copy(gcptr obj, gcptr id); gcptr stm_stub_malloc(struct tx_public_descriptor *pd) { @@ -254,6 +254,7 @@ for (i = 0; i < size; i += 2) { gcptr B = items[i]; assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */ + assert(B->h_tid & GCFLAG_PUBLIC); /* to be on the safe side --- but actually needed, see the gcptrlist_insert2(L, NULL) above */ @@ -265,6 +266,7 @@ assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); assert(IS_POINTER(L->h_revision)); + assert(B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); g2l_insert(&d->public_to_private, B, L); /* this is definitely needed: all keys in public_to_private diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -81,6 +81,7 @@ int stm_enter_callback_call(void) { int token = (thread_descriptor == NULL); + dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { stmgcpage_acquire_global_lock(); DescriptorInit(); @@ -94,6 +95,7 @@ void stm_leave_callback_call(int token) { + dprintf(("leave_callback_call(%d)\n", token)); if (token == 1) stmgc_minor_collect(); /* force everything out of the nursery */ diff --git a/rpython/translator/stm/test/test_stmgcintf.c b/rpython/translator/stm/test/test_stmgcintf.c --- a/rpython/translator/stm/test/test_stmgcintf.c +++ b/rpython/translator/stm/test/test_stmgcintf.c @@ -11,6 +11,7 @@ struct pypy_header0 { long h_tid; Unsigned h_revision; + Unsigned h_original; }; struct pypy_pypy_rlib_rstm_Transaction0 { @@ -33,7 +34,8 @@ #define _RPyString_AsString(x) x #define RPyString_Size(x) strlen(x) - +#include "src_stm/stmgc.h" +#include "src_stm/stmimpl.h" #include "src_stm/et.h" #include "src_stm/et.c" From noreply at buildbot.pypy.org Fri Jul 5 10:47:02 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 10:47:02 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix some tests Message-ID: <20130705084702.2D3F71C0E44@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65190:b3adf69b07ae Date: 2013-07-05 10:46 +0200 http://bitbucket.org/pypy/pypy/changeset/b3adf69b07ae/ Log: fix some tests diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -5,7 +5,7 @@ from pypy.module.thread.threadlocals import BaseThreadLocals from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext -from pypy.interpreter.gateway import Wrappable, W_Root, interp2app +from pypy.interpreter.gateway import W_Root, interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty, descr_get_dict from rpython.rlib import rthread from rpython.rlib import rstm @@ -122,7 +122,7 @@ # ____________________________________________________________ -class STMLocal(Wrappable): +class STMLocal(W_Root): """Thread-local data""" @jit.dont_look_inside diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -28,13 +28,13 @@ def abort_info_pop(count): if we_are_translated(): - stmgcintf.StmOperations.abort_info_pop(count) + pass #stmgcintf.StmOperations.abort_info_pop(count) def charp_inspect_abort_info(): - return stmgcintf.StmOperations.inspect_abort_info() + pass # return stmgcintf.StmOperations.inspect_abort_info() def abort_and_retry(): - stmgcintf.StmOperations.abort_and_retry() + pass # stmgcintf.StmOperations.abort_and_retry() def before_external_call(): llop.stm_commit_transaction(lltype.Void) diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -584,28 +584,30 @@ from rpython.translator.stm.funcgen import op_stm self.__class__.op_stm = op_stm return self.op_stm(op) - OP_STM_INITIALIZE = _OP_STM - OP_STM_FINALIZE = _OP_STM - OP_STM_BECOME_INEVITABLE = _OP_STM - OP_STM_BARRIER = _OP_STM - OP_STM_PTR_EQ = _OP_STM - OP_STM_PUSH_ROOT = _OP_STM - OP_STM_POP_ROOT_INTO = _OP_STM - OP_STM_ALLOCATE = _OP_STM - OP_STM_GET_TID = _OP_STM - OP_STM_HASH = _OP_STM - OP_STM_ID = _OP_STM - OP_STM_COMMIT_TRANSACTION = _OP_STM + OP_STM_INITIALIZE = _OP_STM + OP_STM_FINALIZE = _OP_STM + OP_STM_BECOME_INEVITABLE = _OP_STM + OP_STM_BARRIER = _OP_STM + OP_STM_PTR_EQ = _OP_STM + OP_STM_PUSH_ROOT = _OP_STM + OP_STM_POP_ROOT_INTO = _OP_STM + OP_STM_ALLOCATE = _OP_STM + OP_STM_GET_TID = _OP_STM + OP_STM_HASH = _OP_STM + OP_STM_ID = _OP_STM + OP_STM_COMMIT_TRANSACTION = _OP_STM OP_STM_BEGIN_INEVITABLE_TRANSACTION = _OP_STM - OP_STM_SHOULD_BREAK_TRANSACTION = _OP_STM - OP_STM_SET_TRANSACTION_LENGTH = _OP_STM - OP_STM_CHANGE_ATOMIC = _OP_STM - OP_STM_GET_ATOMIC = _OP_STM - OP_STM_THREADLOCAL_GET = _OP_STM - OP_STM_THREADLOCAL_SET = _OP_STM - OP_STM_PERFORM_TRANSACTION = _OP_STM - OP_STM_ENTER_CALLBACK_CALL = _OP_STM - OP_STM_LEAVE_CALLBACK_CALL = _OP_STM + OP_STM_SHOULD_BREAK_TRANSACTION = _OP_STM + OP_STM_SET_TRANSACTION_LENGTH = _OP_STM + OP_STM_CHANGE_ATOMIC = _OP_STM + OP_STM_GET_ATOMIC = _OP_STM + OP_STM_THREADLOCAL_GET = _OP_STM + OP_STM_THREADLOCAL_SET = _OP_STM + OP_STM_PERFORM_TRANSACTION = _OP_STM + OP_STM_ENTER_CALLBACK_CALL = _OP_STM + OP_STM_LEAVE_CALLBACK_CALL = _OP_STM + OP_STM_MAJOR_COLLECT = _OP_STM + OP_STM_MINOR_COLLECT = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -159,6 +159,13 @@ arg0 = funcgen.expr(op.args[0]) return 'stm_leave_callback_call(%s);' % (arg0,) +def stm_minor_collect(funcgen, op): + return 'stmgc_minor_collect();' + +def stm_major_collect(funcgen, op): + return 'stmgcpage_possibly_major_collect(1);}' # forced + + def op_stm(funcgen, op): func = globals()[op.opname] diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -226,7 +226,8 @@ id_copy->h_tid |= GCFLAG_VISITED; /* XXX: may not always need tracing? */ - gcptrlist_insert(&objects_to_trace, id_copy); + if (!(id_copy->h_tid & GCFLAG_STUB)) + gcptrlist_insert(&objects_to_trace, id_copy); } else { /* prebuilt originals won't get collected anyway diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -38fcfb8212e2 +7a86a8b3cbb1+ diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -120,18 +120,21 @@ def test_bug1(self): # - class Foobar: - pass def check(foobar, retry_counter): rgc.collect(0) return 0 # + from rpython.rtyper.lltypesystem.rclass import OBJECTPTR + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) class X: def __init__(self, count): self.count = count def g(): x = X(1000) - rstm.perform_transaction(check, Foobar, Foobar()) + perform_transaction(lltype.malloc(S)) + #rstm.perform_transaction(check, Foobar, Foobar()) return x def entry_point(argv): x = X(len(argv)) @@ -145,8 +148,6 @@ def test_bug2(self): # - class Foobar: - pass def check(foobar, retry_counter): return 0 # do nothing # @@ -154,11 +155,16 @@ pass prebuilt2 = [X2(), X2()] # + from rpython.rtyper.lltypesystem.rclass import OBJECTPTR + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) def bug2(count): x = prebuilt2[count] x.foobar = 2 # 'x' becomes a local # - rstm.perform_transaction(check, Foobar, Foobar()) + #rstm.perform_transaction(check, Foobar, Foobar()) + perform_transaction(lltype.malloc(S)) # 'x' becomes the global again # y = prebuilt2[count] # same prebuilt obj @@ -175,10 +181,14 @@ assert '12\n12\n' in data, "got: %r" % (data,) def test_prebuilt_nongc(self): - class Foobar: - pass def check(foobar, retry_counter): return 0 # do nothing + from rpython.rtyper.lltypesystem.rclass import OBJECTPTR + from rpython.rtyper.lltypesystem import lltype + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) + from rpython.rtyper.lltypesystem import lltype R = lltype.GcStruct('R', ('x', lltype.Signed)) S1 = lltype.Struct('S1', ('r', lltype.Ptr(R))) @@ -187,7 +197,8 @@ # hints={'stm_thread_local': True}) #s2 = lltype.malloc(S2, immortal=True, flavor='raw') def do_stuff(): - rstm.perform_transaction(check, Foobar, Foobar()) + perform_transaction(lltype.malloc(S)) + #rstm.perform_transaction(check, Foobar, Foobar()) print s1.r.x #print s2.r.x do_stuff._dont_inline_ = True From noreply at buildbot.pypy.org Fri Jul 5 11:05:00 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 11:05:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: typo and change to real descrs Message-ID: <20130705090500.148E61C05DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65191:ceb9b8b28bcf Date: 2013-07-05 11:04 +0200 http://bitbucket.org/pypy/pypy/changeset/ceb9b8b28bcf/ Log: typo and change to real descrs diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -306,62 +306,62 @@ def test_getfield_raw(self): self.check_rewrite(""" [i1, i2] - i3 = getfield_raw(i1, descr= Author: Matti Picus Branch: ndarray-subtype Changeset: r65192:7398ff65e206 Date: 2013-07-05 12:30 +0300 http://bitbucket.org/pypy/pypy/changeset/7398ff65e206/ Log: use appexec (amaury) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -5,22 +5,23 @@ class AppTestSupport(BaseNumpyAppTest): def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) - ''' - from numpypy import ndarray - class NoNew(ndarray): - def __new__(cls): - raise ValueError('should not call __new__') - def __array_finalize(self, obj): - self.called_finalize = True - class SubType(ndarray): - def __new__(cls): - cls.called_new = True - return cls - def __array_finalize(self, obj): - self.called_finalize = True - cls.w_NoNew = cls.space.wrap(NoNew) - cls.w_SubType = cls.space.wrap(SubType) - ''' + cls.w_NoNew = cls.space.appexec([], '''(): + from numpypy import ndarray + class NoNew(ndarray): + def __new__(cls): + raise ValueError('should not call __new__') + def __array_finalize(self, obj): + self.called_finalize = True + return NoNew ''') + cls.w_SubType = cls.space.appexec([], '''(): + from numpypy import ndarray + class SubType(ndarray): + def __new__(cls): + cls.called_new = True + return cls + def __array_finalize(self, obj): + self.called_finalize = True + return SubType ''') def test_finalize(self): #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray From noreply at buildbot.pypy.org Fri Jul 5 13:12:47 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 5 Jul 2013 13:12:47 +0200 (CEST) Subject: [pypy-commit] pypy default: try to document appexec Message-ID: <20130705111247.E9B371C3237@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65193:4eb52818e7c0 Date: 2013-07-05 14:11 +0300 http://bitbucket.org/pypy/pypy/changeset/4eb52818e7c0/ Log: try to document appexec diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- From noreply at buildbot.pypy.org Fri Jul 5 13:50:15 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 13:50:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: missing import Message-ID: <20130705115015.6335A1C05DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65194:ccb4307abb9c Date: 2013-07-05 12:15 +0200 http://bitbucket.org/pypy/pypy/changeset/ccb4307abb9c/ Log: missing import diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -1,5 +1,5 @@ from rpython.rlib.objectmodel import we_are_translated, specialize -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry @@ -31,7 +31,7 @@ pass #stmgcintf.StmOperations.abort_info_pop(count) def charp_inspect_abort_info(): - pass # return stmgcintf.StmOperations.inspect_abort_info() + return rffi.NULL # return stmgcintf.StmOperations.inspect_abort_info() def abort_and_retry(): pass # stmgcintf.StmOperations.abort_and_retry() From noreply at buildbot.pypy.org Fri Jul 5 13:50:16 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 13:50:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: rename of transform2.py to transform.py Message-ID: <20130705115016.C8C6D1C05DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65195:88d375dc39b3 Date: 2013-07-05 13:08 +0200 http://bitbucket.org/pypy/pypy/changeset/88d375dc39b3/ Log: rename of transform2.py to transform.py diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -141,11 +141,11 @@ translator = self.translator if self.config.translation.stm: - from rpython.translator.stm import transform2 + from rpython.translator.stm import transform self.getentrypointptr() # build the wrapper first # ^^ this is needed to make sure we see the no-GC wrapper function # calling the GC entrypoint function. - stmtransformer = transform2.STMTransformer(self.translator) + stmtransformer = transform.STMTransformer(self.translator) stmtransformer.transform() gcpolicyclass = self.get_gcpolicyclass() diff --git a/rpython/translator/stm/test/test_jitdriver.py b/rpython/translator/stm/test/test_jitdriver.py --- a/rpython/translator/stm/test/test_jitdriver.py +++ b/rpython/translator/stm/test/test_jitdriver.py @@ -1,5 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.stm.test.transform2_support import BaseTestTransform +from rpython.translator.stm.test.transform_support import BaseTestTransform from rpython.rlib.jit import JitDriver diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -1,5 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.stm.test.transform2_support import BaseTestTransform +from rpython.translator.stm.test.transform_support import BaseTestTransform class TestTransform(BaseTestTransform): diff --git a/rpython/translator/stm/test/transform2_support.py b/rpython/translator/stm/test/transform_support.py rename from rpython/translator/stm/test/transform2_support.py rename to rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform2_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import lltype, opimpl from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache -from rpython.translator.stm.transform2 import STMTransformer +from rpython.translator.stm.transform import STMTransformer from rpython.translator.stm.writebarrier import MORE_PRECISE_CATEGORIES from rpython.conftest import option diff --git a/rpython/translator/stm/transform2.py b/rpython/translator/stm/transform.py rename from rpython/translator/stm/transform2.py rename to rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform2.py +++ b/rpython/translator/stm/transform.py @@ -1,3 +1,10 @@ +from rpython.translator.backendopt.writeanalyze import WriteAnalyzer +from rpython.translator.stm.writebarrier import insert_stm_barrier +from rpython.translator.stm.inevitable import insert_turn_inevitable +from rpython.translator.stm.jitdriver import reorganize_around_jit_driver +from rpython.translator.stm.threadlocalref import transform_tlref +from rpython.translator.c.support import log + class STMTransformer(object): @@ -18,38 +25,27 @@ self.print_logs_after_gc() def transform_write_barrier(self): - from rpython.translator.backendopt.writeanalyze import WriteAnalyzer - from rpython.translator.stm.writebarrier import insert_stm_barrier - # self.write_analyzer = WriteAnalyzer(self.translator) for graph in self.translator.graphs: insert_stm_barrier(self, graph) del self.write_analyzer def transform_turn_inevitable(self): - from rpython.translator.stm.inevitable import insert_turn_inevitable - # for graph in self.translator.graphs: insert_turn_inevitable(graph) def transform_jit_driver(self): - from rpython.translator.stm.jitdriver import reorganize_around_jit_driver - # for graph in self.translator.graphs: reorganize_around_jit_driver(self, graph) def transform_threadlocalref(self): - from rpython.translator.stm.threadlocalref import transform_tlref transform_tlref(self.translator) def start_log(self): - from rpython.translator.c.support import log log.info("Software Transactional Memory transformation") def print_logs(self): - from rpython.translator.c.support import log log.info("Software Transactional Memory transformation applied") def print_logs_after_gc(self): - from rpython.translator.c.support import log log.info("Software Transactional Memory transformation-after-gc done") From noreply at buildbot.pypy.org Fri Jul 5 13:50:18 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 13:50:18 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add new resops COND_CALL_STM_WB and COND_CALL_STM_RB Message-ID: <20130705115018.18D311C05DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65196:675a3d0878f1 Date: 2013-07-05 13:23 +0200 http://bitbucket.org/pypy/pypy/changeset/675a3d0878f1/ Log: add new resops COND_CALL_STM_WB and COND_CALL_STM_RB diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -121,8 +121,13 @@ except KeyError: return v_base # no barrier needed args = [v_base, self.c_zero] - self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, + if target_category == 'W': + op = rop.COND_CALL_STM_WB + else: + op = rop.COND_CALL_STM_RB + self.newops.append(ResOperation(op, args, None, descr=write_barrier_descr)) + self.known_category[v_base] = target_category return v_base diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -44,7 +44,7 @@ jump() """, """ [p1, p2] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -59,7 +59,7 @@ """, """ [p1, p2] p3 = same_as(ConstPtr(t)) - cond_call_gc_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, 0, descr=P2Wdescr) setfield_gc(p3, p2, descr=tzdescr) jump() """, t=NULL) @@ -87,9 +87,9 @@ jump() """, """ [p1, p2, p3, p4] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) - cond_call_gc_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, 0, descr=P2Wdescr) setfield_gc(p3, p4, descr=tzdescr) jump() """) @@ -102,7 +102,7 @@ jump() """, """ [p1, p2, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) setfield_gc(p1, i3, descr=tydescr) jump() @@ -117,10 +117,10 @@ jump(p1) """, """ [p1, p2, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) label(p1, i3) - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, i3, descr=tydescr) jump(p1) """) @@ -162,7 +162,7 @@ jump(p2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) """) @@ -177,7 +177,7 @@ """, """ [p1] p3 = same_as(ConstPtr(t)) - cond_call_gc_wb(p3, 0, descr=P2Rdescr) + cond_call_stm_rb(p3, 0, descr=P2Rdescr) p2 = getfield_gc(p3, descr=tzdescr) jump(p2) """, t=NULL) @@ -190,7 +190,7 @@ jump(i3) """, """ [p1, i2] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) i3 = getarrayitem_gc(p1, i2, descr=adescr) jump(i3) """) @@ -202,7 +202,7 @@ jump(i3) """, """ [p1, i2] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) i3 = getinteriorfield_gc(p1, i2, descr=adescr) jump(i3) """) @@ -215,7 +215,7 @@ jump(p2, i2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) i2 = getfield_gc(p1, descr=tydescr) jump(p2, i2) @@ -229,9 +229,9 @@ jump(p2, i2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) - cond_call_gc_wb(p2, 0, descr=P2Rdescr) + cond_call_stm_rb(p2, 0, descr=P2Rdescr) i2 = getfield_gc(p2, descr=tydescr) jump(p2, i2) """) @@ -247,10 +247,10 @@ jump(p1) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) i1 = getfield_gc(p1, descr=tydescr) i2 = int_add(i1, 1) - cond_call_gc_wb(p1, 0, descr=R2Wdescr) + cond_call_stm_wb(p1, 0, descr=R2Wdescr) setfield_gc(p1, i2, descr=tydescr) jump(p1) """) @@ -263,7 +263,7 @@ jump(p2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 123, descr=tydescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) @@ -295,10 +295,10 @@ jump(p2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) call(p2) - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 5, descr=tydescr) jump(p2) """) @@ -358,9 +358,9 @@ jump() """, """ [p1, i1, p2, p3, i3, p4] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setarrayitem_gc(p1, i1, p2, descr=adescr) - cond_call_gc_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, 0, descr=P2Wdescr) setarrayitem_gc(p3, i3, p4, descr=adescr) jump() """) @@ -374,7 +374,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setarrayitem_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setarrayitem_gc(p1, i3, p3, descr=adescr) @@ -390,7 +390,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setinteriorfield_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setinteriorfield_gc(p1, i3, p3, descr=adescr) @@ -405,7 +405,7 @@ jump() """, """ [p1, i2, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) jump() @@ -432,11 +432,11 @@ jump(i2, p7) """ % op, """ [i1, i2, i3, p7] - cond_call_gc_wb(p7, 0, descr=P2Wdescr) + cond_call_stm_wb(p7, 0, descr=P2Wdescr) setfield_gc(p7, 10, descr=tydescr) $INEV %s - cond_call_gc_wb(p7, 0, descr=P2Wdescr) + cond_call_stm_wb(p7, 0, descr=P2Wdescr) setfield_gc(p7, 20, descr=tydescr) jump(i2, p7) """ % op, calldescr2=calldescr2) @@ -448,8 +448,8 @@ jump() """, """ [p1, p2, i1, i2, i3] - cond_call_gc_wb(p2, 0, descr=P2Wdescr) - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_wb(p2, 0, descr=P2Wdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) copystrcontent(p1, p2, i1, i2, i3) jump() """) @@ -468,7 +468,7 @@ jump(p1) """ % op, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s setfield_gc(p1, 20, descr=tydescr) @@ -491,10 +491,10 @@ jump(p1) """ % op, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 20, descr=tydescr) jump(p1) """ % op, calldescr2=calldescr2) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -345,6 +345,8 @@ if value in (rop.FORCE_TOKEN, rop.CALL_ASSEMBLER, rop.COND_CALL_GC_WB, + rop.COND_CALL_STM_WB, + rop.COND_CALL_STM_RB, rop.COND_CALL_GC_WB_ARRAY, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -500,6 +500,8 @@ 'STRSETITEM/3', 'UNICODESETITEM/3', #'RUNTIMENEW/1', # ootype operation + 'COND_CALL_STM_WB/2d', # [objptr, newvalue] (write barrier) + 'COND_CALL_STM_RB/2d', # [objptr, newvalue] (read barrier) 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) 'DEBUG_MERGE_POINT/*', # debugging only From noreply at buildbot.pypy.org Fri Jul 5 13:50:19 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 13:50:19 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: I guess those tests need to be rewritten for STM_WB/RB. Still, remove syntax errors. Message-ID: <20130705115019.52F941C05DF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65197:dfde347f3c17 Date: 2013-07-05 13:49 +0200 http://bitbucket.org/pypy/pypy/changeset/dfde347f3c17/ Log: I guess those tests need to be rewritten for STM_WB/RB. Still, remove syntax errors. diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -213,7 +213,7 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - rewriter = gc.GcRewriterAssembler(gc_ll_descr, None) + rewriter = GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4242,3 +4242,14 @@ assert rffi.cast(lltype.Signed, a[0]) == -7654 assert rffi.cast(lltype.Signed, a[1]) == 777 lltype.free(a, flavor='raw') + +class WBDescrForTests(AbstractDescr): + returns_modified_object = False + wb_slowpath = (0, 0, 0, 0) + def get_wb_slowpath(self, c1, c2): + return self.wb_slowpath[c1+2*c2] + def set_wb_slowpath(self, c1, c2, addr): + i = c1+2*c2 + self.wb_slowpath = (self.wb_slowpath[:i] + (addr,) + + self.wb_slowpath[i+1:]) + From noreply at buildbot.pypy.org Fri Jul 5 14:37:27 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 5 Jul 2013 14:37:27 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: cleanup, seperate class instances from class types Message-ID: <20130705123727.5B5601C0E44@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65198:b82565576443 Date: 2013-07-05 15:36 +0300 http://bitbucket.org/pypy/pypy/changeset/b82565576443/ Log: cleanup, seperate class instances from class types diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -10,19 +10,14 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) -def wrap_impl(space, cls, impl): - if cls is None or space.is_w(space.type(cls), space.gettypefor(W_NDimArray)): - ret = W_NDimArray(impl) +def wrap_impl(space, w_cls, w_instance, impl): + if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): + w_ret = W_NDimArray(impl) else: - if space.isinstance_w(cls, space.w_type): - #got type, either from __new__ or from view casting - ret = space.allocate_instance(W_NDimArray, cls) - else: - ret = space.allocate_instance(W_NDimArray, space.type(cls)) - W_NDimArray.__init__(ret, impl) - space.call_function(space.getattr(ret, space.wrap('__array_finalize__')), - cls) - return ret + w_ret = space.allocate_instance(W_NDimArray, w_cls) + W_NDimArray.__init__(w_ret, impl) + space.call_method(w_ret, space.wrap('__array_finalize__'), w_instance) + return w_ret class ArrayArgumentException(Exception): pass @@ -106,7 +101,7 @@ impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) - return wrap_impl(space, orig_arr, impl) + return wrap_impl(space, space.type(orig_arr), orig_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -260,14 +260,16 @@ return self.implementation.get_scalar_value() def descr_copy(self, space): - return wrap_impl(space, self, self.implementation.copy(space)) + return wrap_impl(space, space.type(self), + self, self.implementation.copy(space)) def descr_get_real(self, space): - return wrap_impl(space, self, self.implementation.get_real(self)) + return wrap_impl(space, space.type(self), self, + self.implementation.get_real(self)) def descr_get_imag(self, space): ret = self.implementation.get_imag(self) - return wrap_impl(space, self, ret) + return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): # copy (broadcast) values into self @@ -299,7 +301,7 @@ new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) new_impl = self.implementation.reshape(space, self, new_shape) if new_impl is not None: - return wrap_impl(space, self, new_impl) + return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: @@ -464,7 +466,7 @@ return W_NDimArray.new_scalar(space, dtype, impl.value) else: new_impl = impl.astype(space, dtype) - return wrap_impl(space, self, new_impl) + return wrap_impl(space, space.type(self), self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -664,7 +666,8 @@ "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize v = impl.get_view(self, dtype, new_shape) - return wrap_impl(space, w_type, v) + w_ret = wrap_impl(space, w_type, self, v) + return w_ret # --------------------- operations ---------------------------- diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -8,7 +8,7 @@ cls.w_NoNew = cls.space.appexec([], '''(): from numpypy import ndarray class NoNew(ndarray): - def __new__(cls): + def __new__(cls, subtype): raise ValueError('should not call __new__') def __array_finalize(self, obj): self.called_finalize = True @@ -36,15 +36,17 @@ def __array_finalize__(self, obj): if obj is None: - print 'finazlize with None' + print 'finalize with None' return - print 'finalize with something' + # printing the object itself will crash the test + print 'finalize with something',type(obj) self.info = getattr(obj, 'info', None) obj = InfoArray(shape=(3,)) assert isinstance(obj, InfoArray) assert obj.info is None obj = InfoArray(shape=(3,), info='information') assert obj.info == 'information' + print 'a' v = obj[1:] assert isinstance(v, InfoArray) assert v.base is obj @@ -62,19 +64,19 @@ v = a.view(self.NoNew) assert False - def test_repeat(self): + def test_sub_repeat(self): assert False - def test_flatiter(self): + def test_sub_flatiter(self): assert False - def test_getitem_filter(self): + def test_sub_getitem_filter(self): assert False - def test_getitem_array_int(self): + def test_sub_getitem_array_int(self): assert False - def test_round(self): + def test_sub_round(self): from numpypy import array a = array(range(10), dtype=float).view(self.NoNew) # numpy compatibility @@ -85,24 +87,28 @@ b = a.round(decimal=-1) assert not isinstance(b, self.NoNew) - def test_dot(self): + def test_sub_dot(self): # the returned type is that of the first argument assert False - def test_reduce(self): + def test_sub_reduce(self): # i.e. sum, max # test for out as well assert False - def test_call2(self): + def test_sub_call2(self): # c + a vs. a + c, what about array priority? assert False - def test_call1(self): + def test_sub_call1(self): assert False - def test_astype(self): + def test_sub_astype(self): assert False - def test_reshape(self): - assert False + def test_sub_reshape(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.reshape(3, 4) + assert b.called_finalize == True + From noreply at buildbot.pypy.org Fri Jul 5 15:10:55 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 15:10:55 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: try to satisfy tests.. Message-ID: <20130705131055.D86FC1C01C0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65199:fa4f9e6e4a11 Date: 2013-07-05 14:38 +0200 http://bitbucket.org/pypy/pypy/changeset/fa4f9e6e4a11/ Log: try to satisfy tests.. diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -92,7 +92,7 @@ self._build_wb_slowpath(False, withfloats=True) self._build_wb_slowpath(True, withfloats=True) self._build_propagate_exception_path() - if gc_ll_descr.get_malloc_slowpath_addr is not None: + if gc_ll_descr.get_malloc_slowpath_addr() is not None: # generate few slowpaths for various cases self.malloc_slowpath = self._build_malloc_slowpath(kind='fixed') self.malloc_slowpath_varsize = self._build_malloc_slowpath( diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -241,6 +241,9 @@ return self.malloc_array(arraydescr.basesize, num_elem, arraydescr.itemsize, arraydescr.lendescr.offset) + + def get_malloc_slowpath_addr(self): + return None # ____________________________________________________________ # All code below is for the hybrid or minimark GC From noreply at buildbot.pypy.org Fri Jul 5 15:11:00 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 15:11:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: extract stm barriers from normal write barrier Message-ID: <20130705131100.2604E1C01C0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65200:f2a6fd3f14de Date: 2013-07-05 15:10 +0200 http://bitbucket.org/pypy/pypy/changeset/f2a6fd3f14de/ Log: extract stm barriers from normal write barrier diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -270,37 +270,24 @@ rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) return rffi.cast(lltype.Signed, rst_addr) + class WriteBarrierDescr(AbstractDescr): - def __init__(self, gc_ll_descr, stmcat=None): + def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 - self.stmcat = stmcat - self.returns_modified_object = (stmcat is not None) - if not self.returns_modified_object: - self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address], lltype.Void)) - else: - self.WB_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( - [llmemory.Address], llmemory.Address)) + + self.returns_modified_object = False + self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( + [llmemory.Address], lltype.Void)) + self.fielddescr_tid = gc_ll_descr.fielddescr_tid self.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.HDRPTR = gc_ll_descr.HDRPTR # - if self.stmcat is not None: - cfunc_name = self.stmcat[2] - self.wb_failing_case_ptr = rffi.llexternal( - cfunc_name, - self.WB_FUNCPTR_MOD.TO.ARGS, - self.WB_FUNCPTR_MOD.TO.RESULT, - sandboxsafe=True, - _nowrapper=True) - # GCClass = gc_ll_descr.GCClass if GCClass is None: # for tests return - if self.stmcat is None: - self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG - else: - self.jit_wb_if_flag = self.stmcat[0] + + self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) # @@ -320,11 +307,7 @@ self.wb_slowpath = [0, 0, 0, 0] def repr_of_descr(self): - if self.stmcat is None: - return 'wbdescr' - else: - cat = self.stmcat[1] - return cat + return 'wbdescr' def __repr__(self): return '' % (self.repr_of_descr(),) @@ -395,7 +378,41 @@ if returns_modified_object: return gcref_struct +class STMBarrierDescr(WriteBarrierDescr): + def __init__(self, gc_ll_descr, stmcat, cfunc_name): + WriteBarrierDescr.__init__(self, gc_ll_descr) + self.stmcat = stmcat + self.returns_modified_object = True + self.WB_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( + [llmemory.Address], llmemory.Address)) + self.wb_failing_case_ptr = rffi.llexternal( + cfunc_name, + self.WB_FUNCPTR_MOD.TO.ARGS, + self.WB_FUNCPTR_MOD.TO.RESULT, + sandboxsafe=True, + _nowrapper=True) + + def repr_of_descr(self): + cat = self.stmcat + return cat + + def _do_barrier(self, gcref_struct, returns_modified_object): + raise NotImplemented + +class STMReadBarrierDescr(STMBarrierDescr): + def __init__(self, gc_ll_descr, stmcat): + assert stmcat == 'P2R' + STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, + 'stm_DirectReadBarrier') + +class STMWriteBarrierDescr(STMBarrierDescr): + def __init__(self, gc_ll_descr, stmcat): + assert stmcat in ['P2W', 'R2W'] + STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, + 'stm_WriteBarrier') + + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py kind = 'framework' @@ -494,14 +511,9 @@ self.do_write_barrier = do_write_barrier def _setup_barriers_for_stm(self): - from rpython.memory.gc import stmgc - WBDescr = WriteBarrierDescr - self.P2Rdescr = WBDescr(self, (0, 'P2R', - 'stm_DirectReadBarrier')) - self.P2Wdescr = WBDescr(self, (0, 'P2W', - 'stm_WriteBarrier')) - self.R2Wdescr = WBDescr(self, (0, 'R2W', - 'stm_WriteBarrierFromReady')) + self.P2Rdescr = STMReadBarrierDescr(self, 'P2R') + self.P2Wdescr = STMWriteBarrierDescr(self, 'P2W') + self.R2Wdescr = STMWriteBarrierDescr(self, 'R2W') self.write_barrier_descr = "wbdescr: do not use" # @specialize.argtype(0) From noreply at buildbot.pypy.org Fri Jul 5 15:37:22 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Fri, 5 Jul 2013 15:37:22 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: discontinue the support for 16 bit display depth, because of their color-format Message-ID: <20130705133722.3792B1C0E44@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r487:a2e7f25a1987 Date: 2013-07-05 15:33 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a2e7f25a1987/ Log: discontinue the support for 16 bit display depth, because of their color-format diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -950,8 +950,8 @@ def create(space, w_class, size, depth, display): if depth == 1: return W_DisplayBitmap1Bit(space, w_class, size, depth, display) - elif depth == 16: - return W_DisplayBitmap32Bit(space, w_class, size, depth, display) + # elif depth == 16: + # return W_DisplayBitmap32Bit(space, w_class, size, depth, display) elif depth == 32: return W_DisplayBitmap32Bit(space, w_class, size, depth, display) else: @@ -1022,6 +1022,8 @@ mask >>= 1 pos += 1 +# XXX: We stop supporting 16 bit displays, because the 16bit are with 5bit per +# color channel class W_DisplayBitmap16Bit(W_DisplayBitmap): @jit.unroll_safe def setword(self, n, word): diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -55,7 +55,7 @@ from spyvm import objspace space = objspace.ObjSpace() - + image = create_testimage(space) interp = interpreter.Interpreter(space, image) w_selector = interp.perform(space.wrap_string('loopTest2'), "asSymbol") From noreply at buildbot.pypy.org Fri Jul 5 15:37:23 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Fri, 5 Jul 2013 15:37:23 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: fixed boundary errors with display bitmaps: Message-ID: <20130705133723.546A51C0E44@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r488:d185c4603165 Date: 2013-07-05 15:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d185c4603165/ Log: fixed boundary errors with display bitmaps: every line starts with a round word, therefore the empty space is at the end of every lines last word. The effect is that we need to recalculate the offset of our bloated pixel values and eventually need to stop copying to the diplay buffer prematurely diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1008,14 +1008,30 @@ def __del__(self): lltype.free(self._real_depth_buffer, flavor='raw') + @jit.elidable + def compute_pos_and_line_end(self, n, depth): + width = self.display.width + words_per_line = width / (NATIVE_DEPTH / depth) + if width % (NATIVE_DEPTH / depth) != 0: + words_per_line += 1 + line = n / words_per_line + assert line < self.display.height # line is 0 based + line_start = width * line + line_end = line_start + width # actually the start of the next line + pos = ((n % words_per_line) * (NATIVE_DEPTH / depth)) + line_start + return pos, line_end + + class W_DisplayBitmap1Bit(W_DisplayBitmap): @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word - pos = n * NATIVE_DEPTH + pos, line_end = self.compute_pos_and_line_end(n, 1) mask = r_uint(1) mask <<= 31 for i in xrange(32): + if pos == line_end: + return bit = mask & word pixel = r_uint((0x00ffffff * (bit == 0)) | r_uint(0xff000000)) self.pixelbuffer[pos] = pixel @@ -1028,7 +1044,7 @@ @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word - pos = n * NATIVE_DEPTH / 16 + pos, line_end = self.compute_pos_and_line_end(n, 16) mask = 0xf for i in range(2): pixel = 0 @@ -1036,6 +1052,8 @@ pixel |= r_uint(word & mask << (8 * j + 4)) mask <<= 4 self.pixelbuffer[pos + i] = pixel + if pos + 1 == line_end: + return class W_DisplayBitmap32Bit(W_DisplayBitmap): @jit.unroll_safe From noreply at buildbot.pypy.org Fri Jul 5 15:48:53 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Fri, 5 Jul 2013 15:48:53 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added test for the offset computation for 1bit case Message-ID: <20130705134853.44AB31C05DF@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r489:2c23bb7db0f1 Date: 2013-07-05 15:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2c23bb7db0f1/ Log: added test for the offset computation for 1bit case diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -350,7 +350,6 @@ assert target.getword(0) == 0xffff0100 assert target.getword(1) == 0x7fff8000 - at py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_display_bitmap(): # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent # double-free bug @@ -359,7 +358,7 @@ return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer d = display.SDLDisplay("test") - d.set_video_mode(10, 10, 1) + d.set_video_mode(32, 10, 1) target = model.W_DisplayBitmap.create(space, space.w_Array, 100, 1, d) target.setword(0, r_uint(0xFF00)) @@ -377,6 +376,20 @@ for i in xrange(24, 32): assert target.pixelbuffer[i] == 0xffffffff +def test_display_offset_computation(): + + def get_pixelbuffer(self): + from rpython.rtyper.lltypesystem import lltype, rffi + return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') + display.SDLDisplay.get_pixelbuffer = get_pixelbuffer + d = display.SDLDisplay("test") + d.set_video_mode(18, 5, 1) + + dbitmap = model.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) + + assert dbitmap.compute_pos_and_line_end(0, 1) == (0, 18) + assert dbitmap.compute_pos_and_line_end(1, 1) == (18, 36) + assert dbitmap.size() == 5 @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): From noreply at buildbot.pypy.org Fri Jul 5 15:56:15 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Fri, 5 Jul 2013 15:56:15 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: fixed offset problems for display bitmap test values Message-ID: <20130705135615.729931C05DF@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r490:f7ddeabde122 Date: 2013-07-05 15:55 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f7ddeabde122/ Log: fixed offset problems for display bitmap test values diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -360,7 +360,7 @@ d = display.SDLDisplay("test") d.set_video_mode(32, 10, 1) - target = model.W_DisplayBitmap.create(space, space.w_Array, 100, 1, d) + target = model.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) target.setword(0, r_uint(0xFF00)) assert bin(target.getword(0)) == bin(0xFF00) target.setword(0, r_uint(0x00FF00FF)) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -702,7 +702,7 @@ assert space.objtable["w_display"] is None mock_display = model.W_PointersObject(space, space.w_Point, 4) - w_wordbmp = model.W_WordsObject(space, space.w_Array, 100) + w_wordbmp = model.W_WordsObject(space, space.w_Array, 10) mock_display.store(space, 0, w_wordbmp) # bitmap mock_display.store(space, 1, space.wrap_int(32)) # width mock_display.store(space, 2, space.wrap_int(10)) # height @@ -716,7 +716,7 @@ assert isinstance(sdldisplay, display.SDLDisplay) mock_display2 = model.W_PointersObject(space, space.w_Point, 4) - mock_display2.store(space, 0, model.W_WordsObject(space, space.w_Array, 100)) # bitmap + mock_display2.store(space, 0, model.W_WordsObject(space, space.w_Array, 10)) # bitmap mock_display2.store(space, 1, space.wrap_int(32)) # width mock_display2.store(space, 2, space.wrap_int(10)) # height mock_display2.store(space, 3, space.wrap_int(1)) # depth @@ -741,7 +741,7 @@ display.SDLDisplay.get_pixelbuffer = get_pixelbuffer mock_display = model.W_PointersObject(space, space.w_Point, 4) - w_wordbmp = model.W_WordsObject(space, space.w_Array, 100) + w_wordbmp = model.W_WordsObject(space, space.w_Array, 10) mock_display.store(space, 0, w_wordbmp) # bitmap mock_display.store(space, 1, space.wrap_int(32)) # width mock_display.store(space, 2, space.wrap_int(10)) # height From noreply at buildbot.pypy.org Fri Jul 5 16:58:46 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 5 Jul 2013 16:58:46 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: no fastpath for gc write/read barriers Message-ID: <20130705145846.36FC51C0E1C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65201:43aaaf023f46 Date: 2013-07-05 16:58 +0200 http://bitbucket.org/pypy/pypy/changeset/43aaaf023f46/ Log: no fastpath for gc write/read barriers diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -362,7 +362,7 @@ self.wb_slowpath[withcards + 2 * withfloats] = addr @specialize.arg(2) - def _do_write_barrier(self, gcref_struct, returns_modified_object): + def _do_barrier(self, gcref_struct, returns_modified_object): assert self.returns_modified_object == returns_modified_object hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) hdr_addr -= self.gcheaderbuilder.size_gc_header @@ -397,20 +397,29 @@ cat = self.stmcat return cat + @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): - raise NotImplemented + assert self.returns_modified_object == returns_modified_object + # XXX: fastpath for Read and Write variants + funcptr = self.get_barrier_funcptr(returns_modified_object) + res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) + if returns_modified_object: + return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) + class STMReadBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): assert stmcat == 'P2R' STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_DirectReadBarrier') + 'stm_read_barrier') + + class STMWriteBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): assert stmcat in ['P2W', 'R2W'] STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_WriteBarrier') + 'stm_write_barrier') class GcLLDescr_framework(GcLLDescription): @@ -507,7 +516,7 @@ else: self.write_barrier_descr = WriteBarrierDescr(self) def do_write_barrier(gcref_struct, gcref_newptr): - self.write_barrier_descr._do_write_barrier(gcref_struct, False) + self.write_barrier_descr._do_barrier(gcref_struct, False) self.do_write_barrier = do_write_barrier def _setup_barriers_for_stm(self): @@ -524,7 +533,7 @@ descr = self.P2Wdescr else: descr = self.P2Rdescr - return descr._do_write_barrier(gcref, True) + return descr._do_barrier(gcref, True) self.do_stm_barrier = do_stm_barrier def _make_functions(self, really_not_translated): From noreply at buildbot.pypy.org Fri Jul 5 17:04:00 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 5 Jul 2013 17:04:00 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: fill out tests, try to be consistent about w_ naming Message-ID: <20130705150400.6B4161C0E1C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ndarray-subtype Changeset: r65202:02610cc6e8f6 Date: 2013-07-05 17:58 +0300 http://bitbucket.org/pypy/pypy/changeset/02610cc6e8f6/ Log: fill out tests, try to be consistent about w_ naming diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -31,7 +31,7 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', subtype=None, is_new=False): + def from_shape(space, shape, dtype, order='C', w_subtype=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -40,33 +40,17 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) - if subtype: - if space.isinstance_w(subtype, space.w_type): - #got type, either from __new__ or from view casting - ret = space.allocate_instance(W_NDimArray, subtype) - W_NDimArray.__init__(ret, impl) - if is_new: - space.call_function(space.getattr(ret, - space.wrap('__array_finalize__')), - space.w_None) - else: - # view casting, call finalize - space.call_function(space.getattr(ret, - space.wrap('__array_finalize__')), - subtype) - else: - #got instance - ret = space.allocate_instance(W_NDimArray, space.type(subtype)) - W_NDimArray.__init__(ret, impl) - space.call_function(space.getattr(ret, - space.wrap('__array_finalize__')), - subtype) + if w_subtype: + ret = space.allocate_instance(W_NDimArray, space.type(w_subtype)) + W_NDimArray.__init__(ret, impl) + space.call_function(space.getattr(ret, + space.wrap('__array_finalize__')), w_subtype) else: ret = W_NDimArray(impl) return ret @staticmethod - def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, subtype=None): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -77,21 +61,11 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) - if subtype: - if space.isinstance_w(subtype, space.w_type): - #got type, probably from descr_XXX - ret = space.allocate_instance(W_NDimArray, subtype) - W_NDimArray.__init__(ret, impl) - space.call_function(space.getattr(ret, - space.wrap('__array_finalize__')), - space.w_None) - else: - #got instance - ret = space.allocate_instance(W_NDimArray, space.type(subtype)) - W_NDimArray.__init__(ret, impl) - space.call_function(space.getattr(ret, - space.wrap('__array_finalize__')), - subtype) + if w_subtype: + ret = space.allocate_instance(W_NDimArray, space.type(w_subtype)) + W_NDimArray.__init__(ret, impl) + space.call_function(space.getattr(ret, + space.wrap('__array_finalize__')), w_subtype) return ret return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -88,7 +88,7 @@ y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) - out = W_NDimArray.from_shape(space, shape, dtype, subtype=arr) + out = W_NDimArray.from_shape(space, shape, dtype, w_subtype=arr) return loop.where(out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2): @@ -148,25 +148,25 @@ def repeat(space, w_arr, repeats, w_axis): arr = convert_to_array(space, w_arr) if space.is_none(w_axis): - arr = arr.descr_flatten(space) - orig_size = arr.get_shape()[0] - shape = [arr.get_shape()[0] * repeats] - res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), subtype=arr) + w_arr = arr.descr_flatten(space) + orig_size = w_arr.get_shape()[0] + shape = [w_arr.get_shape()[0] * repeats] + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=w_arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, - orig_size)]).apply(space, res).implementation.setslice(space, arr) + orig_size)]).apply(space, w_res).implementation.setslice(space, w_arr) else: axis = space.int_w(w_axis) - shape = arr.get_shape()[:] + shape = w_arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), subtype=arr) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), subtype=w_arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) - Chunks(chunks).apply(space, res).implementation.setslice(space, arr) - return res + Chunks(chunks).apply(space, w_res).implementation.setslice(space, w_arr) + return w_res def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -85,8 +85,8 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), subtype=self) - return loop.getitem_filter(res, self, arr) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_subtype=self) + return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): @@ -481,9 +481,9 @@ loop.byteswap(self.implementation, self.implementation) return self else: - res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), subtype=self) - loop.byteswap(self.implementation, res.implementation) - return res + w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_subtype=self) + loop.byteswap(self.implementation, w_res.implementation) + return w_res @unwrap_spec(mode=str) def descr_choose(self, space, w_choices, w_out=None, mode='raise'): @@ -775,9 +775,9 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - result = W_NDimArray.from_shape(space, out_shape, dtype, subtype=self) + w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_subtype=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, result, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) @unwrap_spec(w_axis = WrappedDefault(None)) @@ -921,8 +921,8 @@ return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) - ret = W_NDimArray.from_shape(space, shape, dtype, order, w_subtype, is_new=True) - return ret + raise OperationError(space.w_TypeError, space.wrap( + "__new__ is not meant to be called except with a ndarray")) @unwrap_spec(addr=int) def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subclass=None): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -208,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(space, shape, dtype, subtype=obj) + out = W_NDimArray.from_shape(space, shape, dtype, w_subtype=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -217,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, subtype=obj) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_subtype=obj) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -20,8 +20,9 @@ 'left_iter', 'right_iter', 'out_iter']) def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): + # handle array_priority if out is None: - out = W_NDimArray.from_shape(space, shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, w_subtype=w_lhs) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -50,7 +51,7 @@ def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: - out = W_NDimArray.from_shape(space, shape, res_dtype, subtype=w_obj) + out = W_NDimArray.from_shape(space, shape, res_dtype, w_subtype=w_obj) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -10,16 +10,18 @@ class NoNew(ndarray): def __new__(cls, subtype): raise ValueError('should not call __new__') - def __array_finalize(self, obj): + def __array_finalize__(self, obj): + self.called_finalize = True return NoNew ''') cls.w_SubType = cls.space.appexec([], '''(): - from numpypy import ndarray + from numpypy import ndarray, asarray class SubType(ndarray): - def __new__(cls): - cls.called_new = True - return cls - def __array_finalize(self, obj): + def __new__(obj, input_array): + obj = asarray(input_array).view(obj) + obj.called_new = True + return obj + def __array_finalize__(self, obj): self.called_finalize = True return SubType ''') @@ -98,13 +100,25 @@ def test_sub_call2(self): # c + a vs. a + c, what about array priority? - assert False + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = self.SubType(range(12)) + c = b + a + assert isinstance(c, self.SubType) + c = a + b + assert isinstance(c, self.NoNew) def test_sub_call1(self): - assert False + from numpypy import array, sqrt + a = array(range(12)).view(self.NoNew) + b = sqrt(a) + assert b.called_finalize == True def test_sub_astype(self): - assert False + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.astype(float) + assert b.called_finalize == True def test_sub_reshape(self): from numpypy import array From noreply at buildbot.pypy.org Fri Jul 5 17:04:01 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 5 Jul 2013 17:04:01 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: one more w_ Message-ID: <20130705150401.B6F691C0E1C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ndarray-subtype Changeset: r65203:710c211abd6e Date: 2013-07-05 18:03 +0300 http://bitbucket.org/pypy/pypy/changeset/710c211abd6e/ Log: one more w_ diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -161,7 +161,7 @@ chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), subtype=w_arr) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=w_arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) From noreply at buildbot.pypy.org Fri Jul 5 19:41:39 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:39 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Simplify flowspace op creation Message-ID: <20130705174140.003201C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65204:d8b730db4171 Date: 2013-04-28 13:37 +0100 http://bitbucket.org/pypy/pypy/changeset/d8b730db4171/ Log: Simplify flowspace op creation diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -462,11 +462,15 @@ raise FlowingError(self.frame, self.wrap(message)) return self.wrap(value) +def make_impure_op(name, arity): + def generic_operator(self, *args_w): + assert len(args_w) == arity, name + " got the wrong number of arguments" + w_result = self.frame.do_operation_with_implicit_exceptions(name, *args_w) + return w_result + return generic_operator + def make_op(name, arity): """Add function operation to the flow space.""" - if getattr(FlowObjSpace, name, None) is not None: - return - op = None skip = False arithmetic = False @@ -474,11 +478,9 @@ if (name.startswith('del') or name.startswith('set') or name.startswith('inplace_')): - # skip potential mutators - skip = True + return make_impure_op(name, arity) elif name in ('id', 'hash', 'iter', 'userdel'): - # skip potential runtime context dependecies - skip = True + return make_impure_op(name, arity) elif name in ('repr', 'str'): rep = getattr(__builtin__, name) def op(obj): @@ -490,54 +492,50 @@ op = operation.FunctionByName[name] arithmetic = (name + '_ovf') in operation.FunctionByName - if not op and not skip: - raise ValueError("XXX missing operator: %s" % (name,)) - def generic_operator(self, *args_w): assert len(args_w) == arity, name + " got the wrong number of arguments" - if op: - args = [] - for w_arg in args_w: - try: - arg = self.unwrap_for_computation(w_arg) - except UnwrapException: - break + args = [] + for w_arg in args_w: + try: + arg = self.unwrap_for_computation(w_arg) + except UnwrapException: + break + else: + args.append(arg) + else: + # All arguments are constants: call the operator now + try: + result = op(*args) + except Exception, e: + etype = e.__class__ + msg = "%s%r always raises %s: %s" % ( + name, tuple(args), etype, e) + raise FlowingError(self.frame, msg) + else: + # don't try to constant-fold operations giving a 'long' + # result. The result is probably meant to be sent to + # an intmask(), but the 'long' constant confuses the + # annotator a lot. + if arithmetic and type(result) is long: + pass + # don't constant-fold getslice on lists, either + elif name == 'getslice' and type(result) is list: + pass + # otherwise, fine else: - args.append(arg) - else: - # All arguments are constants: call the operator now - try: - result = op(*args) - except Exception, e: - etype = e.__class__ - msg = "%s%r always raises %s: %s" % ( - name, tuple(args), etype, e) - raise FlowingError(self.frame, msg) - else: - # don't try to constant-fold operations giving a 'long' - # result. The result is probably meant to be sent to - # an intmask(), but the 'long' constant confuses the - # annotator a lot. - if arithmetic and type(result) is long: + try: + return self.wrap(result) + except WrapException: + # type cannot sanely appear in flow graph, + # store operation with variable result instead pass - # don't constant-fold getslice on lists, either - elif name == 'getslice' and type(result) is list: - pass - # otherwise, fine - else: - try: - return self.wrap(result) - except WrapException: - # type cannot sanely appear in flow graph, - # store operation with variable result instead - pass w_result = self.frame.do_operation_with_implicit_exceptions(name, *args_w) return w_result - - setattr(FlowObjSpace, name, generic_operator) + return generic_operator for (name, symbol, arity, specialnames) in operation.MethodTable: - make_op(name, arity) + if getattr(FlowObjSpace, name, None) is None: + setattr(FlowObjSpace, name, make_op(name, arity)) def build_flow(func, space=FlowObjSpace()): From noreply at buildbot.pypy.org Fri Jul 5 19:41:41 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:41 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Replace unwrap_for_computation() with Constant.foldable() Message-ID: <20130705174141.64CC71C0E1C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65205:849a055fabb7 Date: 2013-05-01 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/849a055fabb7/ Log: Replace unwrap_for_computation() with Constant.foldable() diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -3,6 +3,7 @@ # # the below object/attribute model evolved from # a discussion in Berlin, 4th of october 2003 +import types import py from rpython.tool.uid import uid, Hashable @@ -261,6 +262,7 @@ dummyname = 'v' namesdict = {dummyname : (dummyname, 0)} + @property def name(self): _name = self._name _nr = self._nr @@ -270,11 +272,10 @@ _nr = self._nr = nd[_name][1] nd[_name] = (_name, _nr + 1) return "%s%d" % (_name, _nr) - name = property(name) + @property def renamed(self): return self._name is not self.dummyname - renamed = property(renamed) def __init__(self, name=None): self._name = self.dummyname @@ -314,6 +315,9 @@ self._name = intern(name) self._nr = nr + def foldable(self): + return False + class Constant(Hashable): __slots__ = ["concretetype"] @@ -323,6 +327,25 @@ if concretetype is not None: self.concretetype = concretetype + def foldable(self): + to_check = self.value + if hasattr(to_check, 'im_self'): + to_check = to_check.im_self + if isinstance(to_check, (type, types.ClassType, types.ModuleType)): + # classes/types/modules are assumed immutable + return True + if (hasattr(to_check, '__class__') and + to_check.__class__.__module__ == '__builtin__'): + # builtin object + return True + # User-created instance + if hasattr(to_check, '_freeze_'): + assert to_check._freeze_() is True + return True + else: + # cannot count on it not mutating at runtime! + return False + class UnwrapException(Exception): """Attempted to unwrap a Variable.""" diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -176,22 +176,6 @@ else: raise TypeError("not wrapped: " + repr(w_obj)) - def unwrap_for_computation(self, w_obj): - obj = self.unwrap(w_obj) - to_check = obj - if hasattr(to_check, 'im_self'): - to_check = to_check.im_self - if (not isinstance(to_check, (type, types.ClassType, types.ModuleType)) and - # classes/types/modules are assumed immutable - hasattr(to_check, '__class__') and to_check.__class__.__module__ != '__builtin__'): - frozen = hasattr(to_check, '_freeze_') - if frozen: - assert to_check._freeze_() is True - else: - # cannot count on it not mutating at runtime! - raise UnwrapException - return obj - def exception_issubclass_w(self, w_cls1, w_cls2): return self.is_true(self.issubtype(w_cls1, w_cls2)) @@ -291,12 +275,8 @@ return self.wrap(not self.is_true(w_obj)) def is_true(self, w_obj): - try: - obj = self.unwrap_for_computation(w_obj) - except UnwrapException: - pass - else: - return bool(obj) + if w_obj.foldable(): + return bool(w_obj.value) w_truthvalue = self.frame.do_operation('is_true', w_obj) return self.frame.guessbool(w_truthvalue) @@ -343,12 +323,8 @@ if w_name not in const_w: return self.frame.do_operation_with_implicit_exceptions('getattr', w_obj, w_name) - try: - obj = self.unwrap_for_computation(w_obj) - name = self.unwrap_for_computation(w_name) - except UnwrapException: - pass - else: + if w_obj.foldable() and w_name.foldable(): + obj, name = w_obj.value, w_name.value try: result = getattr(obj, name) except Exception, e: @@ -495,14 +471,8 @@ def generic_operator(self, *args_w): assert len(args_w) == arity, name + " got the wrong number of arguments" args = [] - for w_arg in args_w: - try: - arg = self.unwrap_for_computation(w_arg) - except UnwrapException: - break - else: - args.append(arg) - else: + if all(w_arg.foldable() for w_arg in args_w): + args = [w_arg.value for w_arg in args_w] # All arguments are constants: call the operator now try: result = op(*args) From noreply at buildbot.pypy.org Fri Jul 5 19:41:42 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:42 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Clean up exc_from_raise() Message-ID: <20130705174142.8C7201C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65206:4b6ab8133315 Date: 2013-05-02 17:32 +0100 http://bitbucket.org/pypy/pypy/changeset/4b6ab8133315/ Log: Clean up exc_from_raise() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -216,36 +216,32 @@ return True return False - def exc_from_raise(self, w_type, w_value): + def exc_from_raise(self, w_arg1, w_arg2): """ Create a wrapped exception from the arguments of a raise statement. Returns an FSException object whose w_value is an instance of w_type. """ - if self.isinstance_w(w_type, self.w_type): + if self.isinstance_w(w_arg1, self.w_type): # this is for all cases of the form (Class, something) - if self.is_w(w_value, self.w_None): + if self.is_w(w_arg2, self.w_None): # raise Type: we assume we have to instantiate Type - w_value = self.call_function(w_type) - w_type = self.type(w_value) + w_value = self.call_function(w_arg1) else: - w_valuetype = self.type(w_value) - if self.exception_issubclass_w(w_valuetype, w_type): + w_valuetype = self.type(w_arg2) + if self.exception_issubclass_w(w_valuetype, w_arg1): # raise Type, Instance: let etype be the exact type of value - w_type = w_valuetype + w_value = w_arg2 else: # raise Type, X: assume X is the constructor argument - w_value = self.call_function(w_type, w_value) - w_type = self.type(w_value) + w_value = self.call_function(w_arg1, w_arg2) else: # the only case left here is (inst, None), from a 'raise inst'. - w_inst = w_type - w_instclass = self.type(w_inst) - if not self.is_w(w_value, self.w_None): + if not self.is_w(w_arg2, self.w_None): raise FSException(self.w_TypeError, self.wrap( "instance exception may not have a separate value")) - w_value = w_inst - w_type = w_instclass + w_value = w_arg1 + w_type = self.type(w_value) return FSException(w_type, w_value) def unpackiterable(self, w_iterable): From noreply at buildbot.pypy.org Fri Jul 5 19:41:43 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:43 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Create exc_wrap() Message-ID: <20130705174143.B550C1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65207:684ea56ca2c5 Date: 2013-05-02 18:08 +0100 http://bitbucket.org/pypy/pypy/changeset/684ea56ca2c5/ Log: Create exc_wrap() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -506,7 +506,7 @@ self.recorder.crnt_block.closeblock(link) except FSException, e: - if e.w_type is self.space.w_ImportError: + if e.w_type == self.space.w_ImportError: msg = 'import statement always raises %s' % e raise ImportError(msg) link = Link([e.w_type, e.w_value], graph.exceptblock) @@ -661,8 +661,8 @@ self.last_exception = operr raise operr else: - raise FSException(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) + raise space.exc_wrap(TypeError( + "raise: no active exception to re-raise")) w_value = space.w_None if nbargs >= 3: diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -135,6 +135,11 @@ raise WrapException return Constant(obj) + def exc_wrap(self, exc): + w_value = self.wrap(exc) + w_type = self.wrap(type(exc)) + return FSException(w_type, w_value) + def int_w(self, w_obj): if isinstance(w_obj, Constant): val = w_obj.value @@ -238,7 +243,7 @@ else: # the only case left here is (inst, None), from a 'raise inst'. if not self.is_w(w_arg2, self.w_None): - raise FSException(self.w_TypeError, self.wrap( + raise self.exc_wrap(TypeError( "instance exception may not have a separate value")) w_value = w_arg1 w_type = self.type(w_value) @@ -292,7 +297,7 @@ try: v, next_unroller = it.step() except IndexError: - raise FSException(self.w_StopIteration, self.w_None) + raise self.exc_wrap(StopIteration()) else: frame.replace_in_stack(it, next_unroller) return self.wrap(v) @@ -341,8 +346,8 @@ def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: mod = __import__(name, glob, loc, frm, level) - except ImportError, e: - raise FSException(self.w_ImportError, self.wrap(str(e))) + except ImportError as e: + raise self.exc_wrap(e) return self.wrap(mod) def import_from(self, w_module, w_name): @@ -357,8 +362,8 @@ try: return self.wrap(getattr(w_module.value, w_name.value)) except AttributeError: - raise FSException(self.w_ImportError, - self.wrap("cannot import name '%s'" % w_name.value)) + raise self.exc_wrap(ImportError( + "cannot import name '%s'" % w_name.value)) def call_method(self, w_obj, methname, *arg_w): w_meth = self.getattr(w_obj, self.wrap(methname)) From noreply at buildbot.pypy.org Fri Jul 5 19:41:44 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:44 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Remove unnecessary guard Message-ID: <20130705174144.E00971C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65208:21a13020a015 Date: 2013-05-12 15:17 +0100 http://bitbucket.org/pypy/pypy/changeset/21a13020a015/ Log: Remove unnecessary guard The special case in space.setitem was never actually executed, since frame.w_globals is never modified directly and other ways of accessing globals() don't return the frame.w_globals object. diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -305,14 +305,6 @@ frame.handle_implicit_exceptions([StopIteration, RuntimeError]) return w_item - def setitem(self, w_obj, w_key, w_val): - # protect us from globals write access - if w_obj is self.frame.w_globals: - raise FlowingError(self.frame, - "Attempting to modify global variable %r." % (w_key)) - return self.frame.do_operation_with_implicit_exceptions('setitem', - w_obj, w_key, w_val) - def setitem_str(self, w_obj, key, w_value): return self.setitem(w_obj, self.wrap(key), w_value) From noreply at buildbot.pypy.org Fri Jul 5 19:41:46 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:46 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: kill dead code Message-ID: <20130705174146.0A9FC1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65209:d3e67403a298 Date: 2013-05-05 19:18 +0100 http://bitbucket.org/pypy/pypy/changeset/d3e67403a298/ Log: kill dead code diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -148,15 +148,6 @@ return val return self.unwrap(w_obj) - def uint_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if type(val) is not rarithmetic.r_uint: - raise TypeError("expected unsigned: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - - def str_w(self, w_obj): if isinstance(w_obj, Constant): val = w_obj.value @@ -165,14 +156,6 @@ return val return self.unwrap(w_obj) - def float_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if type(val) is not float: - raise TypeError("expected float: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - def unwrap(self, w_obj): if isinstance(w_obj, Variable): raise UnwrapException @@ -305,8 +288,6 @@ frame.handle_implicit_exceptions([StopIteration, RuntimeError]) return w_item - def setitem_str(self, w_obj, key, w_value): - return self.setitem(w_obj, self.wrap(key), w_value) def getattr(self, w_obj, w_name): # handling special things like sys From noreply at buildbot.pypy.org Fri Jul 5 19:41:47 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:47 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Extract record_block() from FSFrame.build_flow() Message-ID: <20130705174147.3571F1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65210:4225f5c299d2 Date: 2013-04-28 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/4225f5c299d2/ Log: Extract record_block() from FSFrame.build_flow() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -488,39 +488,42 @@ self.pendingblocks = collections.deque([graph.startblock]) while self.pendingblocks: block = self.pendingblocks.popleft() - try: - self.recorder = self.recording(block) - while True: - self.last_instr = self.handle_bytecode(self.last_instr) - self.recorder.final_state = self.getstate() + self.record_block(block) - except ImplicitOperationError, e: - if isinstance(e.w_type, Constant): - exc_cls = e.w_type.value - else: - exc_cls = Exception - msg = "implicit %s shouldn't occur" % exc_cls.__name__ - w_type = Constant(AssertionError) - w_value = Constant(AssertionError(msg)) - link = Link([w_type, w_value], graph.exceptblock) - self.recorder.crnt_block.closeblock(link) + def record_block(self, block): + try: + self.recorder = self.recording(block) + while True: + self.last_instr = self.handle_bytecode(self.last_instr) + self.recorder.final_state = self.getstate() - except FSException, e: - if e.w_type == self.space.w_ImportError: - msg = 'import statement always raises %s' % e - raise ImportError(msg) - link = Link([e.w_type, e.w_value], graph.exceptblock) - self.recorder.crnt_block.closeblock(link) + except ImplicitOperationError, e: + if isinstance(e.w_type, Constant): + exc_cls = e.w_type.value + else: + exc_cls = Exception + msg = "implicit %s shouldn't occur" % exc_cls.__name__ + w_type = Constant(AssertionError) + w_value = Constant(AssertionError(msg)) + link = Link([w_type, w_value], self.graph.exceptblock) + self.recorder.crnt_block.closeblock(link) - except StopFlowing: - pass + except FSException, e: + if e.w_type == self.space.w_ImportError: + msg = 'import statement always raises %s' % e + raise ImportError(msg) + link = Link([e.w_type, e.w_value], self.graph.exceptblock) + self.recorder.crnt_block.closeblock(link) - except Return as exc: - w_result = exc.value - link = Link([w_result], graph.returnblock) - self.recorder.crnt_block.closeblock(link) + except StopFlowing: + pass - del self.recorder + except Return as exc: + w_result = exc.value + link = Link([w_result], self.graph.returnblock) + self.recorder.crnt_block.closeblock(link) + + self.recorder = None def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr From noreply at buildbot.pypy.org Fri Jul 5 19:41:48 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:48 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Simplify record_block() setup. Message-ID: <20130705174148.566D41C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65211:00d00e8529fb Date: 2013-05-02 04:12 +0100 http://bitbucket.org/pypy/pypy/changeset/00d00e8529fb/ Log: Simplify record_block() setup. Kill FSFrame.recording() and dispatch its logic to Block methods. diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -55,25 +55,44 @@ pass class SpamBlock(Block): - # make slots optional, for debugging - if hasattr(Block, '__slots__'): - __slots__ = "dead framestate".split() - def __init__(self, framestate): Block.__init__(self, framestate.getvariables()) self.framestate = framestate self.dead = False + def make_recorder(self): + return BlockRecorder(self) + class EggBlock(Block): - # make slots optional, for debugging - if hasattr(Block, '__slots__'): - __slots__ = "prevblock booloutcome last_exception".split() - def __init__(self, inputargs, prevblock, booloutcome): Block.__init__(self, inputargs) self.prevblock = prevblock self.booloutcome = booloutcome + @property + def ancestor(self): + parent = self.prevblock + while isinstance(parent, EggBlock): + parent = parent.prevblock + return parent + + @property + def dead(self): + return self.ancestor.dead + + @property + def framestate(self): + return self.ancestor.framestate + + def make_recorder(self): + recorder = BlockRecorder(self) + curr = self + while isinstance(curr, EggBlock): + prev = curr.prevblock + recorder = Replayer(prev, curr.booloutcome, recorder) + curr = prev + return recorder + def extravars(self, last_exception=None, last_exc_value=None): self.last_exception = last_exception @@ -430,24 +449,6 @@ self.last_instr = state.next_instr self.blockstack = state.blocklist[:] - def recording(self, block): - """ Setup recording of the block and return the recorder. """ - parentblocks = [] - parent = block - while isinstance(parent, EggBlock): - parent = parent.prevblock - parentblocks.append(parent) - # parentblocks = [Egg, Egg, ..., Egg, Spam] not including block - if parent.dead: - raise StopFlowing - self.setstate(parent.framestate) - recorder = BlockRecorder(block) - prevblock = block - for parent in parentblocks: - recorder = Replayer(parent, prevblock.booloutcome, recorder) - prevblock = parent - return recorder - def record(self, spaceop): """Record an operation into the active block""" recorder = self.recorder @@ -488,11 +489,13 @@ self.pendingblocks = collections.deque([graph.startblock]) while self.pendingblocks: block = self.pendingblocks.popleft() - self.record_block(block) + if not block.dead: + self.record_block(block) def record_block(self, block): + self.setstate(block.framestate) + self.recorder = block.make_recorder() try: - self.recorder = self.recording(block) while True: self.last_instr = self.handle_bytecode(self.last_instr) self.recorder.final_state = self.getstate() From noreply at buildbot.pypy.org Fri Jul 5 19:41:49 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:49 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Merge FSFrame.record() into .do_operation() Message-ID: <20130705174149.856821C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65212:2e08f401ff36 Date: 2013-05-02 04:14 +0100 http://bitbucket.org/pypy/pypy/changeset/2e08f401ff36/ Log: Merge FSFrame.record() into .do_operation() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -449,21 +449,17 @@ self.last_instr = state.next_instr self.blockstack = state.blocklist[:] - def record(self, spaceop): - """Record an operation into the active block""" + def guessbool(self, w_condition, **kwds): + return self.recorder.guessbool(self, w_condition, **kwds) + + def do_operation(self, name, *args_w): recorder = self.recorder if getattr(recorder, 'final_state', None) is not None: self.mergeblock(recorder.crnt_block, recorder.final_state) raise StopFlowing - recorder.append(spaceop) - - def guessbool(self, w_condition, **kwds): - return self.recorder.guessbool(self, w_condition, **kwds) - - def do_operation(self, name, *args_w): spaceop = SpaceOperation(name, args_w, Variable()) spaceop.offset = self.last_instr - self.record(spaceop) + recorder.append(spaceop) return spaceop.result def do_operation_with_implicit_exceptions(self, name, *args_w): From noreply at buildbot.pypy.org Fri Jul 5 19:41:50 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:50 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Represent flowspace operators as SpaceOperator objects Message-ID: <20130705174150.A48111C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65213:a765e4e1d8a7 Date: 2013-05-03 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/a765e4e1d8a7/ Log: Represent flowspace operators as SpaceOperator objects Kill operation.MethodTable diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -477,9 +477,9 @@ return w_result return generic_operator -for (name, symbol, arity, specialnames) in operation.MethodTable: - if getattr(FlowObjSpace, name, None) is None: - setattr(FlowObjSpace, name, make_op(name, arity)) +for oper in operation.op.__dict__.values(): + if getattr(FlowObjSpace, oper.name, None) is None: + setattr(FlowObjSpace, oper.name, make_op(oper.name, oper.arity)) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -9,87 +9,95 @@ from rpython.tool.sourcetools import compile2 from rpython.rlib.rarithmetic import ovfcheck -# this is a copy that should be shared with standard objspace +class _OpHolder(object): pass +op = _OpHolder() -MethodTable = [ -# method name # symbol # number of arguments # special method name(s) - ('is_', 'is', 2, []), - ('id', 'id', 1, []), - ('type', 'type', 1, []), - ('isinstance', 'isinstance', 2, ['__instancecheck__']), - ('issubtype', 'issubtype', 2, ['__subclasscheck__']), # not for old-style classes - ('repr', 'repr', 1, ['__repr__']), - ('str', 'str', 1, ['__str__']), - ('format', 'format', 2, ['__format__']), - ('len', 'len', 1, ['__len__']), - ('hash', 'hash', 1, ['__hash__']), - ('getattr', 'getattr', 2, ['__getattribute__']), - ('setattr', 'setattr', 3, ['__setattr__']), - ('delattr', 'delattr', 2, ['__delattr__']), - ('getitem', 'getitem', 2, ['__getitem__']), - ('setitem', 'setitem', 3, ['__setitem__']), - ('delitem', 'delitem', 2, ['__delitem__']), - ('getslice', 'getslice', 3, ['__getslice__']), - ('setslice', 'setslice', 4, ['__setslice__']), - ('delslice', 'delslice', 3, ['__delslice__']), - ('trunc', 'trunc', 1, ['__trunc__']), - ('pos', 'pos', 1, ['__pos__']), - ('neg', 'neg', 1, ['__neg__']), - ('nonzero', 'truth', 1, ['__nonzero__']), - ('abs' , 'abs', 1, ['__abs__']), - ('hex', 'hex', 1, ['__hex__']), - ('oct', 'oct', 1, ['__oct__']), - ('ord', 'ord', 1, []), - ('invert', '~', 1, ['__invert__']), - ('add', '+', 2, ['__add__', '__radd__']), - ('sub', '-', 2, ['__sub__', '__rsub__']), - ('mul', '*', 2, ['__mul__', '__rmul__']), - ('truediv', '/', 2, ['__truediv__', '__rtruediv__']), - ('floordiv', '//', 2, ['__floordiv__', '__rfloordiv__']), - ('div', 'div', 2, ['__div__', '__rdiv__']), - ('mod', '%', 2, ['__mod__', '__rmod__']), - ('divmod', 'divmod', 2, ['__divmod__', '__rdivmod__']), - ('pow', '**', 3, ['__pow__', '__rpow__']), - ('lshift', '<<', 2, ['__lshift__', '__rlshift__']), - ('rshift', '>>', 2, ['__rshift__', '__rrshift__']), - ('and_', '&', 2, ['__and__', '__rand__']), - ('or_', '|', 2, ['__or__', '__ror__']), - ('xor', '^', 2, ['__xor__', '__rxor__']), - ('int', 'int', 1, ['__int__']), - ('index', 'index', 1, ['__index__']), - ('float', 'float', 1, ['__float__']), - ('long', 'long', 1, ['__long__']), - ('inplace_add', '+=', 2, ['__iadd__']), - ('inplace_sub', '-=', 2, ['__isub__']), - ('inplace_mul', '*=', 2, ['__imul__']), - ('inplace_truediv', '/=', 2, ['__itruediv__']), - ('inplace_floordiv','//=', 2, ['__ifloordiv__']), - ('inplace_div', 'div=', 2, ['__idiv__']), - ('inplace_mod', '%=', 2, ['__imod__']), - ('inplace_pow', '**=', 2, ['__ipow__']), - ('inplace_lshift', '<<=', 2, ['__ilshift__']), - ('inplace_rshift', '>>=', 2, ['__irshift__']), - ('inplace_and', '&=', 2, ['__iand__']), - ('inplace_or', '|=', 2, ['__ior__']), - ('inplace_xor', '^=', 2, ['__ixor__']), - ('lt', '<', 2, ['__lt__', '__gt__']), - ('le', '<=', 2, ['__le__', '__ge__']), - ('eq', '==', 2, ['__eq__', '__eq__']), - ('ne', '!=', 2, ['__ne__', '__ne__']), - ('gt', '>', 2, ['__gt__', '__lt__']), - ('ge', '>=', 2, ['__ge__', '__le__']), - ('cmp', 'cmp', 2, ['__cmp__']), # rich cmps preferred - ('coerce', 'coerce', 2, ['__coerce__', '__coerce__']), - ('contains', 'contains', 2, ['__contains__']), - ('iter', 'iter', 1, ['__iter__']), - ('next', 'next', 1, ['next']), -# ('call', 'call', 3, ['__call__']), - ('get', 'get', 3, ['__get__']), - ('set', 'set', 3, ['__set__']), - ('delete', 'delete', 2, ['__delete__']), - ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py - ] +class SpaceOperator(object): + def __init__(self, name, arity, symbol): + self.name = name + self.arity = arity + self.symbol = symbol + +def add_operator(name, arity, symbol): + operator = SpaceOperator(name, arity, symbol) + setattr(op, name, operator) + +add_operator('is_', 2, 'is') +add_operator('id', 1, 'id') +add_operator('type', 1, 'type') +add_operator('isinstance', 2, 'isinstance') +add_operator('issubtype', 2, 'issubtype') # not for old-style classes +add_operator('repr', 1, 'repr') +add_operator('str', 1, 'str') +add_operator('format', 2, 'format') +add_operator('len', 1, 'len') +add_operator('hash', 1, 'hash') +add_operator('getattr', 2, 'getattr') +add_operator('setattr', 3, 'setattr') +add_operator('delattr', 2, 'delattr') +add_operator('getitem', 2, 'getitem') +add_operator('setitem', 3, 'setitem') +add_operator('delitem', 2, 'delitem') +add_operator('getslice', 3, 'getslice') +add_operator('setslice', 4, 'setslice') +add_operator('delslice', 3, 'delslice') +add_operator('trunc', 1, 'trunc') +add_operator('pos', 1, 'pos') +add_operator('neg', 1, 'neg') +add_operator('nonzero', 1, 'truth') +add_operator('abs' , 1, 'abs') +add_operator('hex', 1, 'hex') +add_operator('oct', 1, 'oct') +add_operator('ord', 1, 'ord') +add_operator('invert', 1, '~') +add_operator('add', 2, '+') +add_operator('sub', 2, '-') +add_operator('mul', 2, '*') +add_operator('truediv', 2, '/') +add_operator('floordiv', 2, '//') +add_operator('div', 2, 'div') +add_operator('mod', 2, '%') +add_operator('divmod', 2, 'divmod') +add_operator('pow', 3, '**') +add_operator('lshift', 2, '<<') +add_operator('rshift', 2, '>>') +add_operator('and_', 2, '&') +add_operator('or_', 2, '|') +add_operator('xor', 2, '^') +add_operator('int', 1, 'int') +add_operator('index', 1, 'index') +add_operator('float', 1, 'float') +add_operator('long', 1, 'long') +add_operator('inplace_add', 2, '+=') +add_operator('inplace_sub', 2, '-=') +add_operator('inplace_mul', 2, '*=') +add_operator('inplace_truediv', 2, '/=') +add_operator('inplace_floordiv', 2, '//=') +add_operator('inplace_div', 2, 'div=') +add_operator('inplace_mod', 2, '%=') +add_operator('inplace_pow', 2, '**=') +add_operator('inplace_lshift', 2, '<<=') +add_operator('inplace_rshift', 2, '>>=') +add_operator('inplace_and', 2, '&=') +add_operator('inplace_or', 2, '|=') +add_operator('inplace_xor', 2, '^=') +add_operator('lt', 2, '<') +add_operator('le', 2, '<=') +add_operator('eq', 2, '==') +add_operator('ne', 2, '!=') +add_operator('gt', 2, '>') +add_operator('ge', 2, '>=') +add_operator('cmp', 2, 'cmp') # rich cmps preferred +add_operator('coerce', 2, 'coerce') +add_operator('contains', 2, 'contains') +add_operator('iter', 1, 'iter') +add_operator('next', 1, 'next') +#add_operator('call', 3, 'call') +add_operator('get', 3, 'get') +add_operator('set', 3, 'set') +add_operator('delete', 2, 'delete') +add_operator('userdel', 1, 'del') +add_operator('buffer', 1, 'buffer') # see buffer.py FunctionByName = {} # dict {"operation_name": } @@ -304,8 +312,7 @@ def setup(): # insert all operators - for line in MethodTable: - name = line[0] + for name in vars(op): if hasattr(operator, name): Table.append((name, getattr(operator, name))) # build the dictionaries @@ -315,9 +322,8 @@ if func not in OperationName: OperationName[func] = name # check that the result is complete - for line in MethodTable: - name = line[0] - Arity[name] = line[2] + for name, oper in vars(op).iteritems(): + Arity[name] = oper.arity assert name in FunctionByName setup() del Table, setup # INTERNAL ONLY, use the dicts declared at the top of the file From noreply at buildbot.pypy.org Fri Jul 5 19:41:51 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:51 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Kill operation.Arity Message-ID: <20130705174151.F24E71C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65214:eecbf0a00b93 Date: 2013-05-03 16:57 +0100 http://bitbucket.org/pypy/pypy/changeset/eecbf0a00b93/ Log: Kill operation.Arity diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -321,10 +321,6 @@ FunctionByName[name] = func if func not in OperationName: OperationName[func] = name - # check that the result is complete - for name, oper in vars(op).iteritems(): - Arity[name] = oper.arity - assert name in FunctionByName setup() del Table, setup # INTERNAL ONLY, use the dicts declared at the top of the file diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -1,5 +1,5 @@ from rpython.flowspace.model import Constant -from rpython.flowspace.operation import OperationName, Arity +from rpython.flowspace.operation import OperationName, op from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated @@ -10,14 +10,15 @@ def sc_operator(space, fn, args_w): opname = OperationName[fn] - if len(args_w) != Arity[opname]: + oper = getattr(op, opname) + if len(args_w) != oper.arity: if opname == 'pow' and len(args_w) == 2: args_w = args_w + [Constant(None)] elif opname == 'getattr' and len(args_w) == 3: return space.frame.do_operation('simple_call', Constant(getattr), *args_w) else: raise Exception("should call %r with exactly %d arguments" % ( - fn, Arity[opname])) + fn, oper.arity)) # completely replace the call with the underlying # operation and its limited implicit exceptions semantic return getattr(space, opname)(*args_w) From noreply at buildbot.pypy.org Fri Jul 5 19:41:53 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:53 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: setup SpaceOperators a bit more explicitly Message-ID: <20130705174153.350B61C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65215:c78d0f3c3045 Date: 2013-05-03 18:54 +0100 http://bitbucket.org/pypy/pypy/changeset/c78d0f3c3045/ Log: setup SpaceOperators a bit more explicitly diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -245,7 +245,6 @@ Table = [ ('id', id), ('type', new_style_type), - ('type', type), ('isinstance', isinstance), ('issubtype', issubclass), ('repr', repr), @@ -257,9 +256,7 @@ ('setattr', setattr), ('delattr', delattr), ('nonzero', bool), - ('nonzero', operator.truth), ('is_true', bool), - ('is_true', operator.truth), ('trunc', unsupported), ('abs' , abs), ('hex', hex), @@ -307,22 +304,31 @@ ('mod_ovf', mod_ovf), ('lshift_ovf', lshift_ovf), ] -if hasattr(__builtin__, 'next'): - Table.append(('next', __builtin__.next)) -def setup(): - # insert all operators - for name in vars(op): - if hasattr(operator, name): - Table.append((name, getattr(operator, name))) - # build the dictionaries - for name, func in Table: +# build the dictionaries +for name, func in Table: + if name not in FunctionByName: + FunctionByName[name] = func + if func not in OperationName: + OperationName[func] = name +del Table # INTERNAL ONLY, use the dicts declared at the top of the file + +# insert all operators +for name in vars(op): + if hasattr(operator, name): + func = getattr(operator, name) if name not in FunctionByName: FunctionByName[name] = func if func not in OperationName: OperationName[func] = name -setup() -del Table, setup # INTERNAL ONLY, use the dicts declared at the top of the file + +# Other functions that get directly translated to SpaceOperators +func2op = {type: op.type, operator.truth: op.nonzero} +if hasattr(__builtin__, 'next'): + func2op[__builtin__.next] = op.next +for func, oper in func2op.iteritems(): + OperationName[func] = oper.name + op_appendices = { OverflowError: 'ovf', From noreply at buildbot.pypy.org Fri Jul 5 19:41:54 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:41:54 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Kill OperationName (use a mapping of functions to operators instead) Message-ID: <20130705174154.5B3951C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65216:95ee9391ba6f Date: 2013-05-03 19:35 +0100 http://bitbucket.org/pypy/pypy/changeset/95ee9391ba6f/ Log: Kill OperationName (use a mapping of functions to operators instead) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -99,10 +99,13 @@ add_operator('userdel', 1, 'del') add_operator('buffer', 1, 'buffer') # see buffer.py +# Add _ovf ops +for oper in [op.neg, op.abs, op.add, op.sub, op.mul, op.floordiv, op.div, + op.mod, op.lshift]: + add_operator(oper.name + '_ovf', oper.arity, oper.symbol) + FunctionByName = {} # dict {"operation_name": } -OperationName = {} # dict {: "operation_name"} -Arity = {} # dict {"operation name": number of arguments} # ____________________________________________________________ @@ -306,11 +309,12 @@ ] # build the dictionaries +func2op = {} for name, func in Table: if name not in FunctionByName: FunctionByName[name] = func - if func not in OperationName: - OperationName[func] = name + if func not in func2op: + func2op[func] = getattr(op, name) del Table # INTERNAL ONLY, use the dicts declared at the top of the file # insert all operators @@ -319,15 +323,14 @@ func = getattr(operator, name) if name not in FunctionByName: FunctionByName[name] = func - if func not in OperationName: - OperationName[func] = name + if func not in func2op: + func2op[func] = getattr(op, name) # Other functions that get directly translated to SpaceOperators -func2op = {type: op.type, operator.truth: op.nonzero} +func2op[type] = op.type +func2op[operator.truth] = op.nonzero if hasattr(__builtin__, 'next'): func2op[__builtin__.next] = op.next -for func, oper in func2op.iteritems(): - OperationName[func] = oper.name op_appendices = { diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -1,5 +1,5 @@ from rpython.flowspace.model import Constant -from rpython.flowspace.operation import OperationName, op +from rpython.flowspace.operation import func2op, op from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated @@ -9,19 +9,18 @@ return space.import_name(*args) def sc_operator(space, fn, args_w): - opname = OperationName[fn] - oper = getattr(op, opname) + oper = func2op[fn] if len(args_w) != oper.arity: - if opname == 'pow' and len(args_w) == 2: + if oper is op.pow and len(args_w) == 2: args_w = args_w + [Constant(None)] - elif opname == 'getattr' and len(args_w) == 3: + elif oper is op.getattr and len(args_w) == 3: return space.frame.do_operation('simple_call', Constant(getattr), *args_w) else: raise Exception("should call %r with exactly %d arguments" % ( fn, oper.arity)) # completely replace the call with the underlying # operation and its limited implicit exceptions semantic - return getattr(space, opname)(*args_w) + return getattr(space, oper.name)(*args_w) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs @@ -72,5 +71,5 @@ SPECIAL_CASES = {__import__: sc_import, r_uint: sc_r_uint, we_are_translated: sc_we_are_translated, locals: sc_locals} -for fn in OperationName: +for fn in func2op: SPECIAL_CASES[fn] = sc_operator From noreply at buildbot.pypy.org Fri Jul 5 19:42:16 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:16 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Kill operation.Table Message-ID: <20130705174216.71A021C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65217:8f1f99428ec1 Date: 2013-05-03 22:19 +0100 http://bitbucket.org/pypy/pypy/changeset/8f1f99428ec1/ Log: Kill operation.Table diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -12,100 +12,28 @@ class _OpHolder(object): pass op = _OpHolder() +func2op = {} +FunctionByName = {} # dict {"operation_name": } + class SpaceOperator(object): - def __init__(self, name, arity, symbol): + def __init__(self, name, arity, symbol, pyfunc): self.name = name self.arity = arity self.symbol = symbol + self.pyfunc = pyfunc -def add_operator(name, arity, symbol): - operator = SpaceOperator(name, arity, symbol) - setattr(op, name, operator) - -add_operator('is_', 2, 'is') -add_operator('id', 1, 'id') -add_operator('type', 1, 'type') -add_operator('isinstance', 2, 'isinstance') -add_operator('issubtype', 2, 'issubtype') # not for old-style classes -add_operator('repr', 1, 'repr') -add_operator('str', 1, 'str') -add_operator('format', 2, 'format') -add_operator('len', 1, 'len') -add_operator('hash', 1, 'hash') -add_operator('getattr', 2, 'getattr') -add_operator('setattr', 3, 'setattr') -add_operator('delattr', 2, 'delattr') -add_operator('getitem', 2, 'getitem') -add_operator('setitem', 3, 'setitem') -add_operator('delitem', 2, 'delitem') -add_operator('getslice', 3, 'getslice') -add_operator('setslice', 4, 'setslice') -add_operator('delslice', 3, 'delslice') -add_operator('trunc', 1, 'trunc') -add_operator('pos', 1, 'pos') -add_operator('neg', 1, 'neg') -add_operator('nonzero', 1, 'truth') -add_operator('abs' , 1, 'abs') -add_operator('hex', 1, 'hex') -add_operator('oct', 1, 'oct') -add_operator('ord', 1, 'ord') -add_operator('invert', 1, '~') -add_operator('add', 2, '+') -add_operator('sub', 2, '-') -add_operator('mul', 2, '*') -add_operator('truediv', 2, '/') -add_operator('floordiv', 2, '//') -add_operator('div', 2, 'div') -add_operator('mod', 2, '%') -add_operator('divmod', 2, 'divmod') -add_operator('pow', 3, '**') -add_operator('lshift', 2, '<<') -add_operator('rshift', 2, '>>') -add_operator('and_', 2, '&') -add_operator('or_', 2, '|') -add_operator('xor', 2, '^') -add_operator('int', 1, 'int') -add_operator('index', 1, 'index') -add_operator('float', 1, 'float') -add_operator('long', 1, 'long') -add_operator('inplace_add', 2, '+=') -add_operator('inplace_sub', 2, '-=') -add_operator('inplace_mul', 2, '*=') -add_operator('inplace_truediv', 2, '/=') -add_operator('inplace_floordiv', 2, '//=') -add_operator('inplace_div', 2, 'div=') -add_operator('inplace_mod', 2, '%=') -add_operator('inplace_pow', 2, '**=') -add_operator('inplace_lshift', 2, '<<=') -add_operator('inplace_rshift', 2, '>>=') -add_operator('inplace_and', 2, '&=') -add_operator('inplace_or', 2, '|=') -add_operator('inplace_xor', 2, '^=') -add_operator('lt', 2, '<') -add_operator('le', 2, '<=') -add_operator('eq', 2, '==') -add_operator('ne', 2, '!=') -add_operator('gt', 2, '>') -add_operator('ge', 2, '>=') -add_operator('cmp', 2, 'cmp') # rich cmps preferred -add_operator('coerce', 2, 'coerce') -add_operator('contains', 2, 'contains') -add_operator('iter', 1, 'iter') -add_operator('next', 1, 'next') -#add_operator('call', 3, 'call') -add_operator('get', 3, 'get') -add_operator('set', 3, 'set') -add_operator('delete', 2, 'delete') -add_operator('userdel', 1, 'del') -add_operator('buffer', 1, 'buffer') # see buffer.py - -# Add _ovf ops -for oper in [op.neg, op.abs, op.add, op.sub, op.mul, op.floordiv, op.div, - op.mod, op.lshift]: - add_operator(oper.name + '_ovf', oper.arity, oper.symbol) - - -FunctionByName = {} # dict {"operation_name": } +def add_operator(name, arity, symbol, pyfunc=None): + operator_func = getattr(operator, name, None) + oper = SpaceOperator(name, arity, symbol, pyfunc) + setattr(op, name, oper) + if pyfunc is not None: + FunctionByName[name] = pyfunc + func2op[pyfunc] = oper + if operator_func: + func2op[operator_func] = oper + if pyfunc is None: + oper.pyfunc = operator_func + FunctionByName[name] = operator_func # ____________________________________________________________ @@ -237,94 +165,91 @@ def unsupported(*args): raise ValueError("this is not supported") -# ____________________________________________________________ -# The following table can list several times the same operation name, -# if multiple built-in functions correspond to it. The first one should -# be picked, though, as the best built-in for the given operation name. -# Lines ('name', operator.name) are added automatically. +add_operator('is_', 2, 'is') +add_operator('id', 1, 'id', pyfunc=id) +add_operator('type', 1, 'type', pyfunc=new_style_type) +add_operator('isinstance', 2, 'isinstance', pyfunc=isinstance) +add_operator('issubtype', 2, 'issubtype', pyfunc=issubclass) # not for old-style classes +add_operator('repr', 1, 'repr', pyfunc=repr) +add_operator('str', 1, 'str', pyfunc=str) +add_operator('format', 2, 'format', pyfunc=unsupported) +add_operator('len', 1, 'len', pyfunc=len) +add_operator('hash', 1, 'hash', pyfunc=hash) +add_operator('getattr', 2, 'getattr', pyfunc=getattr) +add_operator('setattr', 3, 'setattr', pyfunc=setattr) +add_operator('delattr', 2, 'delattr', pyfunc=delattr) +add_operator('getitem', 2, 'getitem') +add_operator('setitem', 3, 'setitem') +add_operator('delitem', 2, 'delitem') +add_operator('getslice', 3, 'getslice', pyfunc=do_getslice) +add_operator('setslice', 4, 'setslice', pyfunc=do_setslice) +add_operator('delslice', 3, 'delslice', pyfunc=do_delslice) +add_operator('trunc', 1, 'trunc', pyfunc=unsupported) +add_operator('pos', 1, 'pos') +add_operator('neg', 1, 'neg') +add_operator('nonzero', 1, 'truth', pyfunc=bool) +add_operator('abs' , 1, 'abs', pyfunc=abs) +add_operator('hex', 1, 'hex', pyfunc=hex) +add_operator('oct', 1, 'oct', pyfunc=oct) +add_operator('ord', 1, 'ord', pyfunc=ord) +add_operator('invert', 1, '~') +add_operator('add', 2, '+') +add_operator('sub', 2, '-') +add_operator('mul', 2, '*') +add_operator('truediv', 2, '/') +add_operator('floordiv', 2, '//') +add_operator('div', 2, 'div') +add_operator('mod', 2, '%') +add_operator('divmod', 2, 'divmod', pyfunc=divmod) +add_operator('pow', 3, '**', pyfunc=pow) +add_operator('lshift', 2, '<<') +add_operator('rshift', 2, '>>') +add_operator('and_', 2, '&') +add_operator('or_', 2, '|') +add_operator('xor', 2, '^') +add_operator('int', 1, 'int', pyfunc=do_int) +add_operator('index', 1, 'index', pyfunc=do_index) +add_operator('float', 1, 'float', pyfunc=do_float) +add_operator('long', 1, 'long', pyfunc=do_long) +add_operator('inplace_add', 2, '+=', pyfunc=inplace_add) +add_operator('inplace_sub', 2, '-=', pyfunc=inplace_sub) +add_operator('inplace_mul', 2, '*=', pyfunc=inplace_mul) +add_operator('inplace_truediv', 2, '/=', pyfunc=inplace_truediv) +add_operator('inplace_floordiv', 2, '//=', pyfunc=inplace_floordiv) +add_operator('inplace_div', 2, 'div=', pyfunc=inplace_div) +add_operator('inplace_mod', 2, '%=', pyfunc=inplace_mod) +add_operator('inplace_pow', 2, '**=', pyfunc=inplace_pow) +add_operator('inplace_lshift', 2, '<<=', pyfunc=inplace_lshift) +add_operator('inplace_rshift', 2, '>>=', pyfunc=inplace_rshift) +add_operator('inplace_and', 2, '&=', pyfunc=inplace_and) +add_operator('inplace_or', 2, '|=', pyfunc=inplace_or) +add_operator('inplace_xor', 2, '^=', pyfunc=inplace_xor) +add_operator('lt', 2, '<') +add_operator('le', 2, '<=') +add_operator('eq', 2, '==') +add_operator('ne', 2, '!=') +add_operator('gt', 2, '>') +add_operator('ge', 2, '>=') +add_operator('cmp', 2, 'cmp', pyfunc=cmp) # rich cmps preferred +add_operator('coerce', 2, 'coerce', pyfunc=coerce) +add_operator('contains', 2, 'contains') +add_operator('iter', 1, 'iter', pyfunc=iter) +add_operator('next', 1, 'next', pyfunc=next) +#add_operator('call', 3, 'call') +add_operator('get', 3, 'get', pyfunc=get) +add_operator('set', 3, 'set', pyfunc=set) +add_operator('delete', 2, 'delete', pyfunc=delete) +add_operator('userdel', 1, 'del', pyfunc=userdel) +add_operator('buffer', 1, 'buffer', pyfunc=buffer) # see buffer.py -# INTERNAL ONLY, use the dicts declared at the top of the file. -Table = [ - ('id', id), - ('type', new_style_type), - ('isinstance', isinstance), - ('issubtype', issubclass), - ('repr', repr), - ('str', str), - ('format', unsupported), - ('len', len), - ('hash', hash), - ('getattr', getattr), - ('setattr', setattr), - ('delattr', delattr), - ('nonzero', bool), - ('is_true', bool), - ('trunc', unsupported), - ('abs' , abs), - ('hex', hex), - ('oct', oct), - ('ord', ord), - ('divmod', divmod), - ('pow', pow), - ('int', do_int), - ('index', do_index), - ('float', do_float), - ('long', do_long), - ('inplace_add', inplace_add), - ('inplace_sub', inplace_sub), - ('inplace_mul', inplace_mul), - ('inplace_truediv', inplace_truediv), - ('inplace_floordiv',inplace_floordiv), - ('inplace_div', inplace_div), - ('inplace_mod', inplace_mod), - ('inplace_pow', inplace_pow), - ('inplace_lshift', inplace_lshift), - ('inplace_rshift', inplace_rshift), - ('inplace_and', inplace_and), - ('inplace_or', inplace_or), - ('inplace_xor', inplace_xor), - ('cmp', cmp), - ('coerce', coerce), - ('iter', iter), - ('next', next), - ('get', get), - ('set', set), - ('delete', delete), - ('userdel', userdel), - ('buffer', buffer), - ('getslice', do_getslice), - ('setslice', do_setslice), - ('delslice', do_delslice), - # --- operations added by graph transformations --- - ('neg_ovf', neg_ovf), - ('abs_ovf', abs_ovf), - ('add_ovf', add_ovf), - ('sub_ovf', sub_ovf), - ('mul_ovf', mul_ovf), - ('floordiv_ovf', floordiv_ovf), - ('div_ovf', div_ovf), - ('mod_ovf', mod_ovf), - ('lshift_ovf', lshift_ovf), -] +# --- operations added by graph transformations --- +for oper in [op.neg, op.abs, op.add, op.sub, op.mul, op.floordiv, op.div, + op.mod, op.lshift]: + ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) + add_operator(oper.name + '_ovf', oper.arity, oper.symbol, pyfunc=ovf_func) -# build the dictionaries -func2op = {} -for name, func in Table: - if name not in FunctionByName: - FunctionByName[name] = func - if func not in func2op: - func2op[func] = getattr(op, name) -del Table # INTERNAL ONLY, use the dicts declared at the top of the file - -# insert all operators -for name in vars(op): - if hasattr(operator, name): - func = getattr(operator, name) - if name not in FunctionByName: - FunctionByName[name] = func - if func not in func2op: - func2op[func] = getattr(op, name) +FunctionByName['is_true'] = bool # Other functions that get directly translated to SpaceOperators func2op[type] = op.type From noreply at buildbot.pypy.org Fri Jul 5 19:42:17 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:17 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Kill operation.FunctionByName Message-ID: <20130705174217.9DB7E1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65218:560b115996b4 Date: 2013-05-05 17:26 +0100 http://bitbucket.org/pypy/pypy/changeset/560b115996b4/ Log: Kill operation.FunctionByName diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -412,44 +412,46 @@ raise FlowingError(self.frame, self.wrap(message)) return self.wrap(value) -def make_impure_op(name, arity): +def make_impure_op(oper): def generic_operator(self, *args_w): - assert len(args_w) == arity, name + " got the wrong number of arguments" - w_result = self.frame.do_operation_with_implicit_exceptions(name, *args_w) + if len(args_w) != oper.arity: + raise TypeError(oper.name + " got the wrong number of arguments") + w_result = self.frame.do_operation_with_implicit_exceptions(oper.name, *args_w) return w_result return generic_operator -def make_op(name, arity): +def make_op(oper): """Add function operation to the flow space.""" op = None skip = False arithmetic = False + name = oper.name if (name.startswith('del') or name.startswith('set') or name.startswith('inplace_')): - return make_impure_op(name, arity) + return make_impure_op(oper) elif name in ('id', 'hash', 'iter', 'userdel'): - return make_impure_op(name, arity) + return make_impure_op(oper) elif name in ('repr', 'str'): rep = getattr(__builtin__, name) - def op(obj): + def func(obj): s = rep(obj) if "at 0x" in s: print >>sys.stderr, "Warning: captured address may be awkward" return s else: - op = operation.FunctionByName[name] - arithmetic = (name + '_ovf') in operation.FunctionByName + func = oper.pyfunc + arithmetic = hasattr(operation.op, name + '_ovf') def generic_operator(self, *args_w): - assert len(args_w) == arity, name + " got the wrong number of arguments" + assert len(args_w) == oper.arity, name + " got the wrong number of arguments" args = [] if all(w_arg.foldable() for w_arg in args_w): args = [w_arg.value for w_arg in args_w] # All arguments are constants: call the operator now try: - result = op(*args) + result = func(*args) except Exception, e: etype = e.__class__ msg = "%s%r always raises %s: %s" % ( @@ -479,7 +481,7 @@ for oper in operation.op.__dict__.values(): if getattr(FlowObjSpace, oper.name, None) is None: - setattr(FlowObjSpace, oper.name, make_op(oper.name, oper.arity)) + setattr(FlowObjSpace, oper.name, make_op(oper)) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -13,7 +13,6 @@ op = _OpHolder() func2op = {} -FunctionByName = {} # dict {"operation_name": } class SpaceOperator(object): def __init__(self, name, arity, symbol, pyfunc): @@ -27,13 +26,11 @@ oper = SpaceOperator(name, arity, symbol, pyfunc) setattr(op, name, oper) if pyfunc is not None: - FunctionByName[name] = pyfunc func2op[pyfunc] = oper if operator_func: func2op[operator_func] = oper if pyfunc is None: oper.pyfunc = operator_func - FunctionByName[name] = operator_func # ____________________________________________________________ @@ -189,6 +186,7 @@ add_operator('pos', 1, 'pos') add_operator('neg', 1, 'neg') add_operator('nonzero', 1, 'truth', pyfunc=bool) +op.is_true = op.nonzero add_operator('abs' , 1, 'abs', pyfunc=abs) add_operator('hex', 1, 'hex', pyfunc=hex) add_operator('oct', 1, 'oct', pyfunc=oct) @@ -249,8 +247,6 @@ ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) add_operator(oper.name + '_ovf', oper.arity, oper.symbol, pyfunc=ovf_func) -FunctionByName['is_true'] = bool - # Other functions that get directly translated to SpaceOperators func2op[type] = op.type func2op[operator.truth] = op.nonzero diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -1,4 +1,4 @@ -from rpython.flowspace.operation import FunctionByName +from rpython.flowspace.operation import op from rpython.rlib import debug from rpython.rlib.rarithmetic import is_valid_int from rpython.rtyper.lltypesystem import lltype, llmemory @@ -13,7 +13,6 @@ 'lt': True, 'le': True, 'eq': True, 'ne': True, 'is_true': True} -ops_unary = {'is_true': True, 'neg': True, 'abs': True, 'invert': True} # global synonyms for some types from rpython.rlib.rarithmetic import intmask @@ -46,11 +45,13 @@ def get_primitive_op_src(fullopname): assert '_' in fullopname, "%s: not a primitive op" % (fullopname,) typname, opname = fullopname.split('_', 1) - if opname not in FunctionByName and (opname + '_') in FunctionByName: - func = FunctionByName[opname + '_'] # or_, and_ + if hasattr(op, opname): + oper = getattr(op, opname) + elif hasattr(op, opname + '_'): + oper = getattr(op, opname + '_') # or_, and_ else: - assert opname in FunctionByName, "%s: not a primitive op" % (fullopname,) - func = FunctionByName[opname] + raise ValueError("%s: not a primitive op" % (fullopname,)) + func = oper.pyfunc if typname == 'char': # char_lt, char_eq, ... @@ -72,7 +73,7 @@ fullopname,) argtype = argtype_by_name[typname] - if opname in ops_unary: + if oper.arity == 1: def op_function(x): if not isinstance(x, argtype): raise TypeError("%r arg must be %s, got %r instead" % ( From noreply at buildbot.pypy.org Fri Jul 5 19:42:18 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:18 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: remove obsolete special-casing for string exceptions Message-ID: <20130705174218.D764E1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65219:04633b3148b7 Date: 2013-05-05 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/04633b3148b7/ Log: remove obsolete special-casing for string exceptions diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -167,21 +167,6 @@ def exception_issubclass_w(self, w_cls1, w_cls2): return self.is_true(self.issubtype(w_cls1, w_cls2)) - def _exception_match(self, w_exc_type, w_check_class): - """Helper for exception_match - - Handles the base case where w_check_class is a constant exception - type. - """ - if self.is_w(w_exc_type, w_check_class): - return True # fast path (also here to handle string exceptions) - try: - return self.exception_issubclass_w(w_exc_type, w_check_class) - except FSException, e: - if e.match(self, self.w_TypeError): # string exceptions maybe - return False - raise - def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" try: @@ -193,11 +178,11 @@ "Catching %s is not valid in RPython" % check_class.__name__) if not isinstance(check_class, tuple): # the simple case - return self._exception_match(w_exc_type, w_check_class) + return self.exception_issubclass_w(w_exc_type, w_check_class) # special case for StackOverflow (see rlib/rstackovf.py) if check_class == rstackovf.StackOverflow: w_real_class = self.wrap(rstackovf._StackOverflow) - return self._exception_match(w_exc_type, w_real_class) + return self.exception_issubclass_w(w_exc_type, w_real_class) # checking a tuple of classes for w_klass in self.unpackiterable(w_check_class): if self.exception_match(w_exc_type, w_klass): From noreply at buildbot.pypy.org Fri Jul 5 19:42:20 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:20 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: cleanup make_op() Message-ID: <20130705174220.164211C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65220:300866452771 Date: 2013-05-06 01:00 +0100 http://bitbucket.org/pypy/pypy/changeset/300866452771/ Log: cleanup make_op() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -407,8 +407,6 @@ def make_op(oper): """Add function operation to the flow space.""" - op = None - skip = False arithmetic = False name = oper.name From noreply at buildbot.pypy.org Fri Jul 5 19:42:21 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:21 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: kill annoying special case for repr and str in make_op() Message-ID: <20130705174221.4F0441C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65221:8507c35cab3e Date: 2013-05-07 13:50 +0100 http://bitbucket.org/pypy/pypy/changeset/8507c35cab3e/ Log: kill annoying special case for repr and str in make_op() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -416,13 +416,6 @@ return make_impure_op(oper) elif name in ('id', 'hash', 'iter', 'userdel'): return make_impure_op(oper) - elif name in ('repr', 'str'): - rep = getattr(__builtin__, name) - def func(obj): - s = rep(obj) - if "at 0x" in s: - print >>sys.stderr, "Warning: captured address may be awkward" - return s else: func = oper.pyfunc arithmetic = hasattr(operation.op, name + '_ovf') From noreply at buildbot.pypy.org Fri Jul 5 19:42:22 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:22 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Add 'pure' attribute to SpaceOperator and simplify make_op() Message-ID: <20130705174222.866AE1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65222:162201fa82fc Date: 2013-05-18 22:09 +0100 http://bitbucket.org/pypy/pypy/changeset/162201fa82fc/ Log: Add 'pure' attribute to SpaceOperator and simplify make_op() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -407,18 +407,9 @@ def make_op(oper): """Add function operation to the flow space.""" - arithmetic = False name = oper.name - - if (name.startswith('del') or - name.startswith('set') or - name.startswith('inplace_')): - return make_impure_op(oper) - elif name in ('id', 'hash', 'iter', 'userdel'): - return make_impure_op(oper) - else: - func = oper.pyfunc - arithmetic = hasattr(operation.op, name + '_ovf') + func = oper.pyfunc + arithmetic = hasattr(operation.op, name + '_ovf') def generic_operator(self, *args_w): assert len(args_w) == oper.arity, name + " got the wrong number of arguments" @@ -457,7 +448,11 @@ for oper in operation.op.__dict__.values(): if getattr(FlowObjSpace, oper.name, None) is None: - setattr(FlowObjSpace, oper.name, make_op(oper)) + if oper.pure: + op_method = make_op(oper) + else: + op_method = make_impure_op(oper) + setattr(FlowObjSpace, oper.name, op_method) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -15,15 +15,16 @@ func2op = {} class SpaceOperator(object): - def __init__(self, name, arity, symbol, pyfunc): + def __init__(self, name, arity, symbol, pyfunc, pure=False): self.name = name self.arity = arity self.symbol = symbol self.pyfunc = pyfunc + self.pure = pure -def add_operator(name, arity, symbol, pyfunc=None): +def add_operator(name, arity, symbol, pyfunc=None, pure=False): operator_func = getattr(operator, name, None) - oper = SpaceOperator(name, arity, symbol, pyfunc) + oper = SpaceOperator(name, arity, symbol, pyfunc, pure) setattr(op, name, oper) if pyfunc is not None: func2op[pyfunc] = oper @@ -163,53 +164,53 @@ raise ValueError("this is not supported") -add_operator('is_', 2, 'is') +add_operator('is_', 2, 'is', pure=True) add_operator('id', 1, 'id', pyfunc=id) -add_operator('type', 1, 'type', pyfunc=new_style_type) -add_operator('isinstance', 2, 'isinstance', pyfunc=isinstance) -add_operator('issubtype', 2, 'issubtype', pyfunc=issubclass) # not for old-style classes -add_operator('repr', 1, 'repr', pyfunc=repr) -add_operator('str', 1, 'str', pyfunc=str) +add_operator('type', 1, 'type', pyfunc=new_style_type, pure=True) +add_operator('isinstance', 2, 'isinstance', pyfunc=isinstance, pure=True) +add_operator('issubtype', 2, 'issubtype', pyfunc=issubclass, pure=True) # not for old-style classes +add_operator('repr', 1, 'repr', pyfunc=repr, pure=True) +add_operator('str', 1, 'str', pyfunc=str, pure=True) add_operator('format', 2, 'format', pyfunc=unsupported) -add_operator('len', 1, 'len', pyfunc=len) +add_operator('len', 1, 'len', pyfunc=len, pure=True) add_operator('hash', 1, 'hash', pyfunc=hash) -add_operator('getattr', 2, 'getattr', pyfunc=getattr) +add_operator('getattr', 2, 'getattr', pyfunc=getattr, pure=True) add_operator('setattr', 3, 'setattr', pyfunc=setattr) add_operator('delattr', 2, 'delattr', pyfunc=delattr) -add_operator('getitem', 2, 'getitem') +add_operator('getitem', 2, 'getitem', pure=True) add_operator('setitem', 3, 'setitem') add_operator('delitem', 2, 'delitem') -add_operator('getslice', 3, 'getslice', pyfunc=do_getslice) +add_operator('getslice', 3, 'getslice', pyfunc=do_getslice, pure=True) add_operator('setslice', 4, 'setslice', pyfunc=do_setslice) add_operator('delslice', 3, 'delslice', pyfunc=do_delslice) add_operator('trunc', 1, 'trunc', pyfunc=unsupported) -add_operator('pos', 1, 'pos') -add_operator('neg', 1, 'neg') -add_operator('nonzero', 1, 'truth', pyfunc=bool) +add_operator('pos', 1, 'pos', pure=True) +add_operator('neg', 1, 'neg', pure=True) +add_operator('nonzero', 1, 'truth', pyfunc=bool, pure=True) op.is_true = op.nonzero -add_operator('abs' , 1, 'abs', pyfunc=abs) -add_operator('hex', 1, 'hex', pyfunc=hex) -add_operator('oct', 1, 'oct', pyfunc=oct) -add_operator('ord', 1, 'ord', pyfunc=ord) -add_operator('invert', 1, '~') -add_operator('add', 2, '+') -add_operator('sub', 2, '-') -add_operator('mul', 2, '*') -add_operator('truediv', 2, '/') -add_operator('floordiv', 2, '//') -add_operator('div', 2, 'div') -add_operator('mod', 2, '%') -add_operator('divmod', 2, 'divmod', pyfunc=divmod) -add_operator('pow', 3, '**', pyfunc=pow) -add_operator('lshift', 2, '<<') -add_operator('rshift', 2, '>>') -add_operator('and_', 2, '&') -add_operator('or_', 2, '|') -add_operator('xor', 2, '^') -add_operator('int', 1, 'int', pyfunc=do_int) -add_operator('index', 1, 'index', pyfunc=do_index) -add_operator('float', 1, 'float', pyfunc=do_float) -add_operator('long', 1, 'long', pyfunc=do_long) +add_operator('abs' , 1, 'abs', pyfunc=abs, pure=True) +add_operator('hex', 1, 'hex', pyfunc=hex, pure=True) +add_operator('oct', 1, 'oct', pyfunc=oct, pure=True) +add_operator('ord', 1, 'ord', pyfunc=ord, pure=True) +add_operator('invert', 1, '~', pure=True) +add_operator('add', 2, '+', pure=True) +add_operator('sub', 2, '-', pure=True) +add_operator('mul', 2, '*', pure=True) +add_operator('truediv', 2, '/', pure=True) +add_operator('floordiv', 2, '//', pure=True) +add_operator('div', 2, 'div', pure=True) +add_operator('mod', 2, '%', pure=True) +add_operator('divmod', 2, 'divmod', pyfunc=divmod, pure=True) +add_operator('pow', 3, '**', pyfunc=pow, pure=True) +add_operator('lshift', 2, '<<', pure=True) +add_operator('rshift', 2, '>>', pure=True) +add_operator('and_', 2, '&', pure=True) +add_operator('or_', 2, '|', pure=True) +add_operator('xor', 2, '^', pure=True) +add_operator('int', 1, 'int', pyfunc=do_int, pure=True) +add_operator('index', 1, 'index', pyfunc=do_index, pure=True) +add_operator('float', 1, 'float', pyfunc=do_float, pure=True) +add_operator('long', 1, 'long', pyfunc=do_long, pure=True) add_operator('inplace_add', 2, '+=', pyfunc=inplace_add) add_operator('inplace_sub', 2, '-=', pyfunc=inplace_sub) add_operator('inplace_mul', 2, '*=', pyfunc=inplace_mul) @@ -223,23 +224,23 @@ add_operator('inplace_and', 2, '&=', pyfunc=inplace_and) add_operator('inplace_or', 2, '|=', pyfunc=inplace_or) add_operator('inplace_xor', 2, '^=', pyfunc=inplace_xor) -add_operator('lt', 2, '<') -add_operator('le', 2, '<=') -add_operator('eq', 2, '==') -add_operator('ne', 2, '!=') -add_operator('gt', 2, '>') -add_operator('ge', 2, '>=') -add_operator('cmp', 2, 'cmp', pyfunc=cmp) # rich cmps preferred -add_operator('coerce', 2, 'coerce', pyfunc=coerce) -add_operator('contains', 2, 'contains') +add_operator('lt', 2, '<', pure=True) +add_operator('le', 2, '<=', pure=True) +add_operator('eq', 2, '==', pure=True) +add_operator('ne', 2, '!=', pure=True) +add_operator('gt', 2, '>', pure=True) +add_operator('ge', 2, '>=', pure=True) +add_operator('cmp', 2, 'cmp', pyfunc=cmp, pure=True) # rich cmps preferred +add_operator('coerce', 2, 'coerce', pyfunc=coerce, pure=True) +add_operator('contains', 2, 'contains', pure=True) add_operator('iter', 1, 'iter', pyfunc=iter) add_operator('next', 1, 'next', pyfunc=next) #add_operator('call', 3, 'call') -add_operator('get', 3, 'get', pyfunc=get) +add_operator('get', 3, 'get', pyfunc=get, pure=True) add_operator('set', 3, 'set', pyfunc=set) add_operator('delete', 2, 'delete', pyfunc=delete) add_operator('userdel', 1, 'del', pyfunc=userdel) -add_operator('buffer', 1, 'buffer', pyfunc=buffer) # see buffer.py +add_operator('buffer', 1, 'buffer', pyfunc=buffer, pure=True) # see buffer.py # --- operations added by graph transformations --- for oper in [op.neg, op.abs, op.add, op.sub, op.mul, op.floordiv, op.div, diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1169,6 +1169,14 @@ 'iter': 1, 'newlist': 1, 'next': 1, 'simple_call': 1} + def test_mutate_const_list(self): + lst = list('abcdef') + def f(): + lst[0] = 'x' + return lst + graph = self.codetest(f) + assert 'setitem' in self.all_operations(graph) + DATA = {'x': 5, 'y': 6} From noreply at buildbot.pypy.org Fri Jul 5 19:42:23 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:23 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: deal with xxx_ovf operators more automatically Message-ID: <20130705174223.B7F501C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65223:6b1da459ff3a Date: 2013-05-12 14:29 +0100 http://bitbucket.org/pypy/pypy/changeset/6b1da459ff3a/ Log: deal with xxx_ovf operators more automatically diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -409,7 +409,6 @@ """Add function operation to the flow space.""" name = oper.name func = oper.pyfunc - arithmetic = hasattr(operation.op, name + '_ovf') def generic_operator(self, *args_w): assert len(args_w) == oper.arity, name + " got the wrong number of arguments" @@ -429,7 +428,7 @@ # result. The result is probably meant to be sent to # an intmask(), but the 'long' constant confuses the # annotator a lot. - if arithmetic and type(result) is long: + if oper.can_overflow and type(result) is long: pass # don't constant-fold getslice on lists, either elif name == 'getslice' and type(result) is list: diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -15,16 +15,18 @@ func2op = {} class SpaceOperator(object): - def __init__(self, name, arity, symbol, pyfunc, pure=False): + def __init__(self, name, arity, symbol, pyfunc, pure=False, + can_overflow=False): self.name = name self.arity = arity self.symbol = symbol self.pyfunc = pyfunc self.pure = pure + self.can_overflow = can_overflow -def add_operator(name, arity, symbol, pyfunc=None, pure=False): +def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) - oper = SpaceOperator(name, arity, symbol, pyfunc, pure) + oper = SpaceOperator(name, arity, symbol, pyfunc, pure, can_overflow=ovf) setattr(op, name, oper) if pyfunc is not None: func2op[pyfunc] = oper @@ -32,6 +34,9 @@ func2op[operator_func] = oper if pyfunc is None: oper.pyfunc = operator_func + if ovf: + ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) + add_operator(name + '_ovf', arity, symbol, pyfunc=ovf_func) # ____________________________________________________________ @@ -123,33 +128,6 @@ def userdel(x): x.__del__() -def neg_ovf(x): - return ovfcheck(-x) - -def abs_ovf(x): - return ovfcheck(abs(x)) - -def add_ovf(x, y): - return ovfcheck(x + y) - -def sub_ovf(x, y): - return ovfcheck(x - y) - -def mul_ovf(x, y): - return ovfcheck(x * y) - -def floordiv_ovf(x, y): - return ovfcheck(operator.floordiv(x, y)) - -def div_ovf(x, y): - return ovfcheck(operator.div(x, y)) - -def mod_ovf(x, y): - return ovfcheck(x % y) - -def lshift_ovf(x, y): - return ovfcheck(x << y) - # slicing: operator.{get,set,del}slice() don't support b=None or c=None def do_getslice(a, b, c): return a[b:c] @@ -185,24 +163,24 @@ add_operator('delslice', 3, 'delslice', pyfunc=do_delslice) add_operator('trunc', 1, 'trunc', pyfunc=unsupported) add_operator('pos', 1, 'pos', pure=True) -add_operator('neg', 1, 'neg', pure=True) +add_operator('neg', 1, 'neg', pure=True, ovf=True) add_operator('nonzero', 1, 'truth', pyfunc=bool, pure=True) op.is_true = op.nonzero -add_operator('abs' , 1, 'abs', pyfunc=abs, pure=True) +add_operator('abs' , 1, 'abs', pyfunc=abs, pure=True, ovf=True) add_operator('hex', 1, 'hex', pyfunc=hex, pure=True) add_operator('oct', 1, 'oct', pyfunc=oct, pure=True) add_operator('ord', 1, 'ord', pyfunc=ord, pure=True) add_operator('invert', 1, '~', pure=True) -add_operator('add', 2, '+', pure=True) -add_operator('sub', 2, '-', pure=True) -add_operator('mul', 2, '*', pure=True) +add_operator('add', 2, '+', pure=True, ovf=True) +add_operator('sub', 2, '-', pure=True, ovf=True) +add_operator('mul', 2, '*', pure=True, ovf=True) add_operator('truediv', 2, '/', pure=True) -add_operator('floordiv', 2, '//', pure=True) -add_operator('div', 2, 'div', pure=True) -add_operator('mod', 2, '%', pure=True) +add_operator('floordiv', 2, '//', pure=True, ovf=True) +add_operator('div', 2, 'div', pure=True, ovf=True) +add_operator('mod', 2, '%', pure=True, ovf=True) add_operator('divmod', 2, 'divmod', pyfunc=divmod, pure=True) add_operator('pow', 3, '**', pyfunc=pow, pure=True) -add_operator('lshift', 2, '<<', pure=True) +add_operator('lshift', 2, '<<', pure=True, ovf=True) add_operator('rshift', 2, '>>', pure=True) add_operator('and_', 2, '&', pure=True) add_operator('or_', 2, '|', pure=True) @@ -242,12 +220,6 @@ add_operator('userdel', 1, 'del', pyfunc=userdel) add_operator('buffer', 1, 'buffer', pyfunc=buffer, pure=True) # see buffer.py -# --- operations added by graph transformations --- -for oper in [op.neg, op.abs, op.add, op.sub, op.mul, op.floordiv, op.div, - op.mod, op.lshift]: - ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) - add_operator(oper.name + '_ovf', oper.arity, oper.symbol, pyfunc=ovf_func) - # Other functions that get directly translated to SpaceOperators func2op[type] = op.type func2op[operator.truth] = op.nonzero From noreply at buildbot.pypy.org Fri Jul 5 19:42:24 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:24 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: kill 'simplifying' translation option Message-ID: <20130705174224.E07C61C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65224:49ca349da077 Date: 2013-05-13 02:49 +0100 http://bitbucket.org/pypy/pypy/changeset/49ca349da077/ Log: kill 'simplifying' translation option diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -170,7 +170,6 @@ cmdline="--make-jobs", default=detect_number_of_processors()), # Flags of the TranslationContext: - BoolOption("simplifying", "Simplify flow graphs", default=True), BoolOption("list_comprehension_operations", "When true, look for and special-case the sequence of " "operations that results from a list comprehension and " diff --git a/rpython/translator/test/test_translator.py b/rpython/translator/test/test_translator.py --- a/rpython/translator/test/test_translator.py +++ b/rpython/translator/test/test_translator.py @@ -8,7 +8,7 @@ d['key'] = 'value' def test_example(): - t = TranslationContext(simplifying=True) + t = TranslationContext() t.buildflowgraph(example) # this specific example triggered a bug in simplify.py #t.view() diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -21,7 +21,6 @@ class TranslationContext(object): FLOWING_FLAGS = { 'verbose': False, - 'simplifying': True, 'list_comprehension_operations': False, # True, - not super-tested } @@ -30,8 +29,7 @@ from rpython.config.translationoption import get_combined_translation_config config = get_combined_translation_config(translating=True) # ZZZ should go away in the end - for attr in ['verbose', 'simplifying', - 'list_comprehension_operations']: + for attr in ['verbose', 'list_comprehension_operations']: if attr in flowing_flags: setattr(config.translation, attr, flowing_flags[attr]) self.config = config @@ -54,8 +52,7 @@ if self.config.translation.verbose: log.start(nice_repr_for_func(func)) graph = build_flow(func) - if self.config.translation.simplifying: - simplify.simplify_graph(graph) + simplify.simplify_graph(graph) if self.config.translation.list_comprehension_operations: simplify.detect_list_comprehension(graph) if self.config.translation.verbose: From noreply at buildbot.pypy.org Fri Jul 5 19:42:26 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:26 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Remove fn argument from 'special cases' Message-ID: <20130705174226.1756E1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65225:b54330f38c8d Date: 2013-05-19 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/b54330f38c8d/ Log: Remove fn argument from 'special cases' diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -352,7 +352,7 @@ args_w = args.arguments_w + self.unpackiterable(args.w_stararg) else: args_w = args.arguments_w - return sc(self, fn, args_w) + return sc(self, args_w) if args.keywords or isinstance(args.w_stararg, Variable): shape, args_w = args.flatten() diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -3,24 +3,25 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated -def sc_import(space, fn, args_w): +def sc_import(space, args_w): assert len(args_w) > 0 and len(args_w) <= 5, 'import needs 1 to 5 arguments' args = [space.unwrap(arg) for arg in args_w] return space.import_name(*args) -def sc_operator(space, fn, args_w): - oper = func2op[fn] - if len(args_w) != oper.arity: - if oper is op.pow and len(args_w) == 2: - args_w = args_w + [Constant(None)] - elif oper is op.getattr and len(args_w) == 3: - return space.frame.do_operation('simple_call', Constant(getattr), *args_w) - else: - raise Exception("should call %r with exactly %d arguments" % ( - fn, oper.arity)) - # completely replace the call with the underlying - # operation and its limited implicit exceptions semantic - return getattr(space, oper.name)(*args_w) +def make_sc(oper): + def sc_operator(space, args_w): + if len(args_w) != oper.arity: + if oper is op.pow and len(args_w) == 2: + args_w = args_w + [Constant(None)] + elif oper is op.getattr and len(args_w) == 3: + return space.frame.do_operation('simple_call', Constant(getattr), *args_w) + else: + raise Exception("should call %r with exactly %d arguments" % ( + oper.name, oper.arity)) + # completely replace the call with the underlying + # operation and its limited implicit exceptions semantic + return getattr(space, oper.name)(*args_w) + return sc_operator # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs @@ -47,7 +48,7 @@ # _________________________________________________________________________ -def sc_r_uint(space, r_uint, args_w): +def sc_r_uint(space, args_w): # special case to constant-fold r_uint(32-bit-constant) # (normally, the 32-bit constant is a long, and is not allowed to # show up in the flow graphs at all) @@ -56,10 +57,10 @@ return Constant(r_uint(w_value.value)) return space.frame.do_operation('simple_call', space.wrap(r_uint), w_value) -def sc_we_are_translated(space, we_are_translated, args_w): +def sc_we_are_translated(space, args_w): return Constant(True) -def sc_locals(space, locals, args): +def sc_locals(space, args): raise Exception( "A function calling locals() is not RPython. " "Note that if you're translating code outside the PyPy " @@ -71,5 +72,5 @@ SPECIAL_CASES = {__import__: sc_import, r_uint: sc_r_uint, we_are_translated: sc_we_are_translated, locals: sc_locals} -for fn in func2op: - SPECIAL_CASES[fn] = sc_operator +for fn, oper in func2op.items(): + SPECIAL_CASES[fn] = make_sc(oper) From noreply at buildbot.pypy.org Fri Jul 5 19:42:27 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 19:42:27 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Turn make_sc() into a method of SpaceOperator Message-ID: <20130705174227.3AC9F1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65226:697a17c9d3a1 Date: 2013-07-05 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/697a17c9d3a1/ Log: Turn make_sc() into a method of SpaceOperator diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -8,6 +8,7 @@ import operator from rpython.tool.sourcetools import compile2 from rpython.rlib.rarithmetic import ovfcheck +from rpython.flowspace.model import Constant class _OpHolder(object): pass op = _OpHolder() @@ -24,6 +25,22 @@ self.pure = pure self.can_overflow = can_overflow + def make_sc(self): + def sc_operator(space, args_w): + if len(args_w) != self.arity: + if self is op.pow and len(args_w) == 2: + args_w = args_w + [Constant(None)] + elif self is op.getattr and len(args_w) == 3: + return space.frame.do_operation('simple_call', Constant(getattr), *args_w) + else: + raise Exception("should call %r with exactly %d arguments" % ( + self.name, self.arity)) + # completely replace the call with the underlying + # operation and its limited implicit exceptions semantic + return getattr(space, self.name)(*args_w) + return sc_operator + + def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) oper = SpaceOperator(name, arity, symbol, pyfunc, pure, can_overflow=ovf) diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -8,21 +8,6 @@ args = [space.unwrap(arg) for arg in args_w] return space.import_name(*args) -def make_sc(oper): - def sc_operator(space, args_w): - if len(args_w) != oper.arity: - if oper is op.pow and len(args_w) == 2: - args_w = args_w + [Constant(None)] - elif oper is op.getattr and len(args_w) == 3: - return space.frame.do_operation('simple_call', Constant(getattr), *args_w) - else: - raise Exception("should call %r with exactly %d arguments" % ( - oper.name, oper.arity)) - # completely replace the call with the underlying - # operation and its limited implicit exceptions semantic - return getattr(space, oper.name)(*args_w) - return sc_operator - # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: @@ -73,4 +58,4 @@ we_are_translated: sc_we_are_translated, locals: sc_locals} for fn, oper in func2op.items(): - SPECIAL_CASES[fn] = make_sc(oper) + SPECIAL_CASES[fn] = oper.make_sc() From noreply at buildbot.pypy.org Fri Jul 5 21:25:53 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 5 Jul 2013 21:25:53 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Split off a builtins_exceptions dict from operation.implicit_exceptions Message-ID: <20130705192553.81BEC1C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65227:c5ec93a944ef Date: 2013-07-05 21:25 +0200 http://bitbucket.org/pypy/pypy/changeset/c5ec93a944ef/ Log: Split off a builtins_exceptions dict from operation.implicit_exceptions The keys of implicit_exceptions were a mixture of builtin callables and operator names. Now both dicts are homogenous. diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -45,6 +45,16 @@ } } +# built-ins that can always raise exceptions +builtins_exceptions = { + int: [ValueError], + float: [ValueError], + chr: [ValueError], + unichr: [ValueError], + unicode: [UnicodeDecodeError], +} + + def _assert_rpythonic(func): """Raise ValueError if ``func`` is obviously not RPython""" if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): @@ -365,15 +375,6 @@ args_w = args.arguments_w w_res = self.frame.do_operation('simple_call', w_callable, *args_w) - # maybe the call has generated an exception (any one) - # but, let's say, not if we are calling a built-in class or function - # because this gets in the way of the special-casing of - # - # raise SomeError(x) - # - # as shown by test_objspace.test_raise3. - - exceptions = [Exception] # *any* exception by default if isinstance(w_callable, Constant): c = w_callable.value if (isinstance(c, (types.BuiltinFunctionType, @@ -381,8 +382,11 @@ types.ClassType, types.TypeType)) and c.__module__ in ['__builtin__', 'exceptions']): - exceptions = operation.implicit_exceptions.get(c) - self.frame.handle_implicit_exceptions(exceptions) + if c in builtins_exceptions: + self.frame.handle_implicit_exceptions(builtins_exceptions[c]) + return w_res + # *any* exception for non-builtins + self.frame.handle_implicit_exceptions([Exception]) return w_res def find_global(self, w_globals, varname): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -253,11 +253,6 @@ } implicit_exceptions = { - int: [ValueError], # built-ins that can always raise exceptions - float: [ValueError], - chr: [ValueError], - unichr: [ValueError], - unicode: [UnicodeDecodeError], # specifying IndexError, and KeyError beyond Exception, # allows the annotator to be more precise, see test_reraiseAnything/KeyError in # the annotator tests From noreply at buildbot.pypy.org Sat Jul 6 02:47:28 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 6 Jul 2013 02:47:28 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Fix FlowObjSpace.import_from() Message-ID: <20130706004728.CD0F41C2FE8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65228:fd3bd5cf5c91 Date: 2013-07-06 01:16 +0200 http://bitbucket.org/pypy/pypy/changeset/fd3bd5cf5c91/ Log: Fix FlowObjSpace.import_from() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -19,7 +19,7 @@ from rpython.flowspace.pygraph import PyGraph from rpython.flowspace.specialcase import SPECIAL_CASES from rpython.rlib.unroll import unrolling_iterable, _unroller -from rpython.rlib import rstackovf, rarithmetic +from rpython.rlib import rstackovf from rpython.rlib.rarithmetic import is_valid_int @@ -323,10 +323,10 @@ assert isinstance(w_name, Constant) # handle sys if w_module in self.not_really_const: - const_w = self.not_really_const[w_obj] + const_w = self.not_really_const[w_module] if w_name not in const_w: return self.frame.do_operation_with_implicit_exceptions('getattr', - w_obj, w_name) + w_module, w_name) try: return self.wrap(getattr(w_module.value, w_name.value)) except AttributeError: diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py, sys +import py from contextlib import contextmanager from rpython.flowspace.model import Constant, mkentrymap, c_last_exception @@ -1177,6 +1177,20 @@ graph = self.codetest(f) assert 'setitem' in self.all_operations(graph) + def test_sys_getattr(self): + def f(): + import sys + return sys.modules + graph = self.codetest(f) + assert 'getattr' in self.all_operations(graph) + + def test_sys_import_from(self): + def f(): + from sys import modules + return modules + graph = self.codetest(f) + assert 'getattr' in self.all_operations(graph) + DATA = {'x': 5, 'y': 6} From noreply at buildbot.pypy.org Sat Jul 6 02:47:30 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 6 Jul 2013 02:47:30 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Kill operation.implicit_exceptions Message-ID: <20130706004730.210551C3000@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65229:99b641e81673 Date: 2013-07-06 02:46 +0200 http://bitbucket.org/pypy/pypy/changeset/99b641e81673/ Log: Kill operation.implicit_exceptions diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -14,7 +14,7 @@ recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, rpython_print_newline) -from rpython.flowspace.operation import implicit_exceptions +from rpython.flowspace.operation import op class FlowingError(Exception): @@ -228,6 +228,7 @@ w_exc_cls, w_exc_value = egg.inputargs[-2:] if isinstance(egg.last_exception, Constant): w_exc_cls = egg.last_exception + assert not isinstance(w_exc_cls.value, list) raise ImplicitOperationError(w_exc_cls, w_exc_value) # ____________________________________________________________ @@ -464,7 +465,8 @@ def do_operation_with_implicit_exceptions(self, name, *args_w): w_result = self.do_operation(name, *args_w) - self.handle_implicit_exceptions(implicit_exceptions.get(name)) + oper = getattr(op, name) + self.handle_implicit_exceptions(oper.canraise) return w_result def handle_implicit_exceptions(self, exceptions): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -24,6 +24,7 @@ self.pyfunc = pyfunc self.pure = pure self.can_overflow = can_overflow + self.canraise = [] def make_sc(self): def sc_operator(space, args_w): @@ -252,19 +253,18 @@ ValueError: 'val', } -implicit_exceptions = { - # specifying IndexError, and KeyError beyond Exception, - # allows the annotator to be more precise, see test_reraiseAnything/KeyError in - # the annotator tests - 'getitem': [IndexError, KeyError, Exception], - 'setitem': [IndexError, KeyError, Exception], - 'delitem': [IndexError, KeyError, Exception], - 'contains': [Exception], # from an r_dict - } +# specifying IndexError, and KeyError beyond Exception, +# allows the annotator to be more precise, see test_reraiseAnything/KeyError in +# the annotator tests +op.getitem.canraise = [IndexError, KeyError, Exception] +op.setitem.canraise = [IndexError, KeyError, Exception] +op.delitem.canraise = [IndexError, KeyError, Exception] +op.contains.canraise = [Exception] # from an r_dict def _add_exceptions(names, exc): for name in names.split(): - lis = implicit_exceptions.setdefault(name, []) + oper = getattr(op, name) + lis = oper.canraise if exc in lis: raise ValueError, "your list is causing duplication!" lis.append(exc) @@ -273,12 +273,13 @@ def _add_except_ovf(names): # duplicate exceptions and add OverflowError for name in names.split(): - lis = implicit_exceptions.setdefault(name, [])[:] - lis.append(OverflowError) - implicit_exceptions[name+"_ovf"] = lis + oper = getattr(op, name) + oper_ovf = getattr(op, name+'_ovf') + oper_ovf.canraise = list(oper.canraise) + oper_ovf.canraise.append(OverflowError) _add_exceptions("""div mod divmod truediv floordiv pow - inplace_div inplace_mod inplace_divmod inplace_truediv + inplace_div inplace_mod inplace_truediv inplace_floordiv inplace_pow""", ZeroDivisionError) _add_exceptions("""pow inplace_pow lshift inplace_lshift rshift inplace_rshift""", ValueError) @@ -287,7 +288,7 @@ inplace_floordiv inplace_div inplace_mod inplace_pow inplace_lshift""", OverflowError) # without a _ovf version _add_except_ovf("""neg abs add sub mul - floordiv div mod pow lshift""") # with a _ovf version + floordiv div mod lshift""") # with a _ovf version _add_exceptions("""pow""", OverflowError) # for the float case del _add_exceptions, _add_except_ovf diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -120,7 +120,8 @@ covf = Constant(rarithmetic.ovfcheck) def check_syntax(opname): - exlis = operation.implicit_exceptions.get("%s_ovf" % (opname,), []) + oper = getattr(operation.op, opname + "_ovf") + exlis = oper.canraise if OverflowError not in exlis: raise Exception("ovfcheck in %s: Operation %s has no" " overflow variant" % (graph.name, opname)) @@ -495,11 +496,11 @@ # look for removable operations whose result is never used for i in range(len(block.operations)-1, -1, -1): op = block.operations[i] - if op.result not in read_vars: + if op.result not in read_vars: if canremove(op, block): del block.operations[i] - elif op.opname == 'simple_call': - # XXX we want to have a more effective and safe + elif op.opname == 'simple_call': + # XXX we want to have a more effective and safe # way to check if this operation has side effects # ... if op.args and isinstance(op.args[0], Constant): @@ -626,7 +627,7 @@ while candidates: cand, tgts = candidates.pop() - newexits = list(cand.exits) + newexits = list(cand.exits) for case, tgt in tgts: exit = cand.exits[case] rrenaming = dict(zip(tgt.inputargs,exit.args)) From noreply at buildbot.pypy.org Sat Jul 6 11:10:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 11:10:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: Typo Message-ID: <20130706091025.CA56D1C05DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r352:62b79f9f4e7c Date: 2013-07-06 11:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/62b79f9f4e7c/ Log: Typo diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -485,7 +485,7 @@ we may occasionally see a PUBLIC object --- one that was a private/protected object when it was added to old_objects_to_trace, and has been stolen. So we have to - check and not do any change the obj->h_tid in that case. + check and not do any change to the obj->h_tid in that case. Otherwise this conflicts with the rule that we may only modify obj->h_tid of a public object in order to add PUBLIC_TO_PRIVATE. From noreply at buildbot.pypy.org Sat Jul 6 11:10:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 11:10:24 +0200 (CEST) Subject: [pypy-commit] stmgc default: Do we need to trace at all? The tests pass like this Message-ID: <20130706091024.8164F1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r351:840ac0a8867a Date: 2013-07-06 11:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/840ac0a8867a/ Log: Do we need to trace at all? The tests pass like this diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -225,8 +225,8 @@ id_copy->h_tid |= GCFLAG_VISITED; /* XXX: may not always need tracing? */ - if (!(id_copy->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, id_copy); + //if (!(id_copy->h_tid & GCFLAG_STUB)) + // gcptrlist_insert(&objects_to_trace, id_copy); } else { /* prebuilt originals won't get collected anyway From noreply at buildbot.pypy.org Sat Jul 6 11:10:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 11:10:27 +0200 (CEST) Subject: [pypy-commit] stmgc default: Kill the undolog. It was done for the purpose of thread-local refs, Message-ID: <20130706091027.260B71C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r353:617a7397cc2d Date: 2013-07-06 11:09 +0200 http://bitbucket.org/pypy/stmgc/changeset/617a7397cc2d/ Log: Kill the undolog. It was done for the purpose of thread-local refs, but now this uses 'old_thread_local_obj' playing the role of a one-item undolog. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -817,20 +817,6 @@ } #endif -#if 0 - /* run the undo log in reverse order, cancelling the values set by - stm_ThreadLocalRef_LLSet(). */ - if (d->undolog.size > 0) { - gcptr *item = d->undolog.items; - long i; - for (i=d->undolog.size; i>=0; i-=2) { - void **addr = (void **)(item[i-2]); - void *oldvalue = (void *)(item[i-1]); - *addr = oldvalue; - } - } -#endif - /* upon abort, set the reads size limit to 94% of how much was read so far. This should ensure that, assuming the retry does the same thing, it will commit just before it reaches the conflicting point. @@ -937,7 +923,6 @@ d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); #if 0 - gcptrlist_clear(&d->undolog); gcptrlist_clear(&d->abortinfo); #endif } @@ -1496,17 +1481,6 @@ /************************************************************/ -#if 0 -void stm_ThreadLocalRef_LLSet(void **addr, void *newvalue) -{ - struct tx_descriptor *d = thread_descriptor; - gcptrlist_insert2(&d->undolog, (gcptr)addr, (gcptr)*addr); - *addr = newvalue; -} -#endif - -/************************************************************/ - struct tx_descriptor *stm_tx_head = NULL; struct tx_public_descriptor *stm_descriptor_array[MAX_THREADS] = {0}; static revision_t descriptor_array_free_list = 0; @@ -1638,7 +1612,6 @@ #if 0 gcptrlist_delete(&d->abortinfo); free(d->longest_abort_info); - gcptrlist_delete(&d->undolog); #endif int num_aborts = 0, num_spinloops = 0; From noreply at buildbot.pypy.org Sat Jul 6 11:27:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 11:27:27 +0200 (CEST) Subject: [pypy-commit] stmgc default: Move the stm_hash/stm_id/stm_pointer_equal to a new file, extra.c, Message-ID: <20130706092728.0093A1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r354:70f314b926f7 Date: 2013-07-06 11:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/70f314b926f7/ Log: Move the stm_hash/stm_id/stm_pointer_equal to a new file, extra.c, whose purpose is to contain "non-core" things offered in stmgc.h. diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -16,10 +16,10 @@ H_FILES = atomic_ops.h stmgc.h stmimpl.h \ et.h lists.h steal.h nursery.h gcpage.h \ - stmsync.h dbgmem.h fprintcolor.h + stmsync.h extra.h dbgmem.h fprintcolor.h C_FILES = et.c lists.c steal.c nursery.c gcpage.c \ - stmsync.c dbgmem.c fprintcolor.c + stmsync.c extra.c dbgmem.c fprintcolor.c DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 diff --git a/c4/extra.c b/c4/extra.c new file mode 100644 --- /dev/null +++ b/c4/extra.c @@ -0,0 +1,139 @@ +#include "stmimpl.h" + + +void stm_copy_to_old_id_copy(gcptr obj, gcptr id) +{ + //assert(!is_in_nursery(thread_descriptor, id)); + assert(id->h_tid & GCFLAG_OLD); + + size_t size = stmgc_size(obj); + memcpy(id, obj, size); + id->h_tid &= ~GCFLAG_HAS_ID; + id->h_tid |= GCFLAG_OLD; + dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id)); +} + +/************************************************************/ +/* Each object has a h_original pointer to an old copy of + the same object (e.g. an old revision), the "original". + The memory location of this old object is used as the ID + for this object. If h_original is NULL *and* it is an + old object copy, it itself is the original. This invariant + must be upheld by all code dealing with h_original. + The original copy must never be moved again. Also, it may + be just a stub-object. + + If we want the ID of an object which is still young, + we must preallocate an old shadow-original that is used + as the target of the young object in a minor collection. + In this case, we set the HAS_ID flag on the young obj + to notify minor_collect. + This flag can be lost if the young obj is stolen. Then + the stealing thread uses the shadow-original itself and + minor_collect must not overwrite it again. + Also, if there is already a backup-copy around, we use + this instead of allocating another old object to use as + the shadow-original. + */ + +static revision_t mangle_hash(revision_t n) +{ + /* To hash pointers in dictionaries. Assumes that i shows some + alignment (to 4, 8, maybe 16 bytes), so we use the following + formula to avoid the trailing bits being always 0. + This formula is reversible: two different values of 'i' will + always give two different results. + */ + return n ^ (((urevision_t)n) >> 4); +} + + +revision_t stm_hash(gcptr p) +{ + /* Prebuilt objects may have a specific hash stored in an extra + field. For now, we will simply always follow h_original and + see, if it is a prebuilt object (XXX: maybe propagate a flag + to all copies of a prebuilt to avoid this cache miss). + */ + if (p->h_original) { + if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + return p->h_original; + } + gcptr orig = (gcptr)p->h_original; + if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) { + return orig->h_original; + } + } + return mangle_hash(stm_id(p)); +} + + +revision_t stm_id(gcptr p) +{ + struct tx_descriptor *d = thread_descriptor; + revision_t result; + + if (p->h_original) { /* fast path */ + if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + /* h_original may contain a specific hash value, + but in case of the prebuilt original version, + its memory location is the id */ + return (revision_t)p; + } + + dprintf(("stm_id(%p) has orig fst: %p\n", + p, (gcptr)p->h_original)); + return p->h_original; + } + else if (p->h_tid & GCFLAG_OLD) { + /* old objects must have an h_original xOR be + the original itself. */ + dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p)); + return (revision_t)p; + } + + spinlock_acquire(d->public_descriptor->collection_lock, 'I'); + /* old objects must have an h_original xOR be + the original itself. + if some thread stole p when it was still young, + it must have set h_original. stealing an old obj + makes the old obj "original". + */ + if (p->h_original) { /* maybe now? */ + result = p->h_original; + dprintf(("stm_id(%p) has orig: %p\n", + p, (gcptr)p->h_original)); + } + else { + /* must create shadow original object XXX: or use + backup, if exists */ + + /* XXX use stmgcpage_malloc() directly, we don't need to copy + * the contents yet */ + gcptr O = stmgc_duplicate_old(p); + p->h_original = (revision_t)O; + p->h_tid |= GCFLAG_HAS_ID; + + if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + gcptr B = (gcptr)p->h_revision; + B->h_original = (revision_t)O; + } + + result = (revision_t)O; + dprintf(("stm_id(%p) young, make shadow %p\n", p, O)); + } + + spinlock_release(d->public_descriptor->collection_lock); + return result; +} + +_Bool stm_pointer_equal(gcptr p1, gcptr p2) +{ + /* fast path for two equal pointers */ + if (p1 == p2) + return 1; + /* types must be the same */ + if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) + return 0; + return stm_id(p1) == stm_id(p2); +} diff --git a/c4/extra.h b/c4/extra.h new file mode 100644 --- /dev/null +++ b/c4/extra.h @@ -0,0 +1,8 @@ +#ifndef _SRCSTM_EXTRA_H +#define _SRCSTM_EXTRA_H + + +void stm_copy_to_old_id_copy(gcptr obj, gcptr id); + + +#endif diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -125,131 +125,6 @@ } /************************************************************/ -/* Each object has a h_original pointer to an old copy of - the same object (e.g. an old revision), the "original". - The memory location of this old object is used as the ID - for this object. If h_original is NULL *and* it is an - old object copy, it itself is the original. This invariant - must be upheld by all code dealing with h_original. - The original copy must never be moved again. Also, it may - be just a stub-object. - - If we want the ID of an object which is still young, - we must preallocate an old shadow-original that is used - as the target of the young object in a minor collection. - In this case, we set the HAS_ID flag on the young obj - to notify minor_collect. - This flag can be lost if the young obj is stolen. Then - the stealing thread uses the shadow-original itself and - minor_collect must not overwrite it again. - Also, if there is already a backup-copy around, we use - this instead of allocating another old object to use as - the shadow-original. - */ - -static revision_t mangle_hash(revision_t n) -{ - /* To hash pointers in dictionaries. Assumes that i shows some - alignment (to 4, 8, maybe 16 bytes), so we use the following - formula to avoid the trailing bits being always 0. - This formula is reversible: two different values of 'i' will - always give two different results. - */ - return n ^ (((urevision_t)n) >> 4); -} - - -revision_t stm_hash(gcptr p) -{ - /* Prebuilt objects may have a specific hash stored in an extra - field. For now, we will simply always follow h_original and - see, if it is a prebuilt object (XXX: maybe propagate a flag - to all copies of a prebuilt to avoid this cache miss). - */ - if (p->h_original) { - if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { - return p->h_original; - } - gcptr orig = (gcptr)p->h_original; - if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) { - return orig->h_original; - } - } - return mangle_hash(stm_id(p)); -} - - -revision_t stm_id(gcptr p) -{ - struct tx_descriptor *d = thread_descriptor; - revision_t result; - - if (p->h_original) { /* fast path */ - if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { - /* h_original may contain a specific hash value, - but in case of the prebuilt original version, - its memory location is the id */ - return (revision_t)p; - } - - dprintf(("stm_id(%p) has orig fst: %p\n", - p, (gcptr)p->h_original)); - return p->h_original; - } - else if (p->h_tid & GCFLAG_OLD) { - /* old objects must have an h_original xOR be - the original itself. */ - dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p)); - return (revision_t)p; - } - - spinlock_acquire(d->public_descriptor->collection_lock, 'I'); - /* old objects must have an h_original xOR be - the original itself. - if some thread stole p when it was still young, - it must have set h_original. stealing an old obj - makes the old obj "original". - */ - if (p->h_original) { /* maybe now? */ - result = p->h_original; - dprintf(("stm_id(%p) has orig: %p\n", - p, (gcptr)p->h_original)); - } - else { - /* must create shadow original object XXX: or use - backup, if exists */ - - /* XXX use stmgcpage_malloc() directly, we don't need to copy - * the contents yet */ - gcptr O = stmgc_duplicate_old(p); - p->h_original = (revision_t)O; - p->h_tid |= GCFLAG_HAS_ID; - - if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { - gcptr B = (gcptr)p->h_revision; - B->h_original = (revision_t)O; - } - - result = (revision_t)O; - dprintf(("stm_id(%p) young, make shadow %p\n", p, O)); - } - - spinlock_release(d->public_descriptor->collection_lock); - return result; -} - -_Bool stm_pointer_equal(gcptr p1, gcptr p2) -{ - /* fast path for two equal pointers */ - if (p1 == p2) - return 1; - /* types must be the same */ - if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) - return 0; - return stm_id(p1) == stm_id(p2); -} - -/************************************************************/ static inline gcptr create_old_object_copy(gcptr obj) { @@ -266,18 +141,6 @@ return fresh_old_copy; } -void copy_to_old_id_copy(gcptr obj, gcptr id) -{ - assert(!is_in_nursery(thread_descriptor, id)); - assert(id->h_tid & GCFLAG_OLD); - - size_t size = stmgc_size(obj); - memcpy(id, obj, size); - id->h_tid &= ~GCFLAG_HAS_ID; - id->h_tid |= GCFLAG_OLD; - dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id)); -} - static void visit_if_young(gcptr *root) { gcptr obj = *root; @@ -303,7 +166,7 @@ /* already has a place to go to */ gcptr id_obj = (gcptr)obj->h_original; - copy_to_old_id_copy(obj, id_obj); + stm_copy_to_old_id_copy(obj, id_obj); fresh_old_copy = id_obj; obj->h_tid &= ~GCFLAG_HAS_ID; } diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -1,8 +1,6 @@ #include "stmimpl.h" -void copy_to_old_id_copy(gcptr obj, gcptr id); - gcptr stm_stub_malloc(struct tx_public_descriptor *pd) { assert(pd->collection_lock != 0); @@ -167,7 +165,7 @@ /* use id-copy for us */ O = (gcptr)L->h_original; L->h_tid &= ~GCFLAG_HAS_ID; - copy_to_old_id_copy(L, O); + stm_copy_to_old_id_copy(L, O); O->h_original = 0; } else { /* Copy the object out of the other thread's nursery, diff --git a/c4/stmgc.c b/c4/stmgc.c --- a/c4/stmgc.c +++ b/c4/stmgc.c @@ -9,5 +9,6 @@ #include "nursery.c" #include "gcpage.c" #include "stmsync.c" +#include "extra.c" #include "dbgmem.c" #include "fprintcolor.c" diff --git a/c4/stmimpl.h b/c4/stmimpl.h --- a/c4/stmimpl.h +++ b/c4/stmimpl.h @@ -12,7 +12,7 @@ # endif #endif -#ifdef _GC_DEBUG +#if defined(_GC_DEBUG) && !defined(DUMP_EXTRA) # if _GC_DEBUG >= 2 # define DUMP_EXTRA # endif @@ -35,5 +35,6 @@ #include "et.h" #include "steal.h" #include "stmsync.h" +#include "extra.h" #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -11,11 +11,11 @@ header_files = [os.path.join(parent_dir, _n) for _n in "et.h lists.h steal.h nursery.h gcpage.h " - "stmsync.h dbgmem.h fprintcolor.h " + "stmsync.h extra.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in "et.c lists.c steal.c nursery.c gcpage.c " - "stmsync.c dbgmem.c fprintcolor.c".split()] + "stmsync.c extra.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): From noreply at buildbot.pypy.org Sat Jul 6 12:08:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 12:08:48 +0200 (CEST) Subject: [pypy-commit] stmgc default: In-progress Message-ID: <20130706100848.725241C05DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r356:5b802f21bafe Date: 2013-07-06 12:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/5b802f21bafe/ Log: In-progress diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -422,29 +422,6 @@ goto restart_all; } -#if 0 -void *stm_DirectReadBarrierFromR(void *G1, void *R_Container1, size_t offset) -{ - return _direct_read_barrier((gcptr)G1, (gcptr)R_Container1, offset); -} -#endif - -gcptr stm_RepeatReadBarrier(gcptr O) -{ - abort();//XXX -#if 0 - // LatestGlobalRevision(O) would either return O or abort - // the whole transaction, so omitting it is not wrong - struct tx_descriptor *d = thread_descriptor; - gcptr L; - wlog_t *entry; - G2L_FIND(d->global_to_local, O, entry, return O); - L = entry->val; - assert(L->h_revision == stm_local_revision); - return L; -#endif -} - static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { gcptr B; @@ -749,11 +726,6 @@ smp_spinloop(); } -#if 0 -size_t _stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, - int abort_reason, char *output); -#endif - void AbortPrivateFromProtected(struct tx_descriptor *d); void AbortTransaction(int num) @@ -795,27 +767,24 @@ elapsed_time = 1; } -#if 0 - size_t size; if (elapsed_time >= d->longest_abort_info_time) { /* decode the 'abortinfo' and produce a human-readable summary in the string 'longest_abort_info' */ - size = _stm_decode_abort_info(d, elapsed_time, num, NULL); + size_t size = stm_decode_abort_info(d, elapsed_time, num, NULL); free(d->longest_abort_info); d->longest_abort_info = malloc(size); if (d->longest_abort_info == NULL) d->longest_abort_info_time = 0; /* out of memory! */ else { - if (_stm_decode_abort_info(d, elapsed_time, + if (stm_decode_abort_info(d, elapsed_time, num, d->longest_abort_info) != size) stm_fatalerror("during stm abort: object mutated unexpectedly\n"); d->longest_abort_info_time = elapsed_time; } } -#endif /* upon abort, set the reads size limit to 94% of how much was read so far. This should ensure that, assuming the retry does the same diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -181,7 +181,6 @@ void SpinLoop(int); gcptr stm_DirectReadBarrier(gcptr); -gcptr stm_RepeatReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but not recording anything */ diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -153,10 +153,12 @@ gcptrlist_reduce_size(&d->abortinfo, newsize < 0 ? 0 : newsize); } -#if 0 size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, int abort_reason, char *output) { + return 1; +} +#if 0 /* re-encodes the abort info as a single string. For convenience (no escaping needed, no limit on integer sizes, etc.) we follow the bittorrent format. */ @@ -233,14 +235,14 @@ } break; default: - fprintf(stderr, "Fatal RPython error: corrupted abort log\n"); - abort(); + stm_fatalerror("corrupted abort log\n"); } } } WRITE('e'); WRITE('\0'); /* final null character */ #undef WRITE +#undef WRITE_BUF return totalsize; } #endif diff --git a/c4/extra.h b/c4/extra.h --- a/c4/extra.h +++ b/c4/extra.h @@ -3,6 +3,7 @@ void stm_copy_to_old_id_copy(gcptr obj, gcptr id); - +size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, + int abort_reason, char *output); #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -65,7 +65,7 @@ long stm_atomic(long delta); int stm_enter_callback_call(void); void stm_leave_callback_call(int); - void stm_abort_info_push(void *obj, void *fieldoffsets); + void stm_abort_info_push(gcptr obj, long fieldoffsets[]); void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -13,7 +13,23 @@ def test_abort_info_stack(): p = nalloc(HDR) q = nalloc(HDR) - lib.stm_abort_info_push(p, ffi.cast("void *", 123)) - lib.stm_abort_info_push(q, ffi.cast("void *", 125)) + lib.stm_abort_info_push(p, ffi.cast("long *", 123)) + lib.stm_abort_info_push(q, ffi.cast("long *", 125)) lib.stm_abort_info_pop(2) # no real test here + +def test_inspect_abort_info_signed(): + py.test.skip("in-progress") + fo1 = ffi.new("long[]", [-2, 1, HDR, -1, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc(HDR + WORD) + lib.setlong(p, 0, -421289712) + lib.stm_abort_info_push(p, fo1) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c) == "???" From noreply at buildbot.pypy.org Sat Jul 6 12:08:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 12:08:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: Start to copy the abortinfo from pypy's stm-gc-2's rpyintf.c. Message-ID: <20130706100847.3F2B21C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r355:5544b0de06cf Date: 2013-07-06 11:39 +0200 http://bitbucket.org/pypy/stmgc/changeset/5544b0de06cf/ Log: Start to copy the abortinfo from pypy's stm-gc-2's rpyintf.c. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -922,9 +922,7 @@ d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); -#if 0 gcptrlist_clear(&d->abortinfo); -#endif } void BeginTransaction(jmp_buf* buf) @@ -1609,10 +1607,8 @@ assert(d->private_from_protected.size == 0); gcptrlist_delete(&d->private_from_protected); gcptrlist_delete(&d->list_of_read_objects); -#if 0 gcptrlist_delete(&d->abortinfo); free(d->longest_abort_info); -#endif int num_aborts = 0, num_spinloops = 0; char line[256], *p = line; diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -152,9 +152,9 @@ unsigned int num_aborts[ABORT_REASONS]; unsigned int num_spinloops[SPINLOOP_REASONS]; struct GcPtrList list_of_read_objects; - //struct GcPtrList abortinfo; struct GcPtrList private_from_protected; struct G2L public_to_private; + struct GcPtrList abortinfo; char *longest_abort_info; long long longest_abort_info_time; revision_t *private_revision_ref; diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -137,3 +137,119 @@ return 0; return stm_id(p1) == stm_id(p2); } + +/************************************************************/ + +void stm_abort_info_push(gcptr obj, long fieldoffsets[]) +{ + struct tx_descriptor *d = thread_descriptor; + gcptrlist_insert2(&d->abortinfo, obj, (gcptr)fieldoffsets); +} + +void stm_abort_info_pop(long count) +{ + struct tx_descriptor *d = thread_descriptor; + long newsize = d->abortinfo.size - 2 * count; + gcptrlist_reduce_size(&d->abortinfo, newsize < 0 ? 0 : newsize); +} + +#if 0 +size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, + int abort_reason, char *output) +{ + /* re-encodes the abort info as a single string. + For convenience (no escaping needed, no limit on integer + sizes, etc.) we follow the bittorrent format. */ + size_t totalsize = 0; + long i; + char buffer[32]; + size_t res_size; +#define WRITE(c) { totalsize++; if (output) *output++=(c); } +#define WRITE_BUF(p, sz) { totalsize += (sz); \ + if (output) { \ + memcpy(output, (p), (sz)); output += (sz); \ + } \ + } + WRITE('l'); + WRITE('l'); + res_size = sprintf(buffer, "i%llde", (long long)elapsed_time); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%de", (int)abort_reason); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lde", (long)d->public_descriptor_index); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lde", (long)d->atomic); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%de", (int)d->active); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lue", (unsigned long)d->count_reads); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lue", + (unsigned long)d->reads_size_limit_nonatomic); + WRITE_BUF(buffer, res_size); + WRITE('e'); + for (i=0; iabortinfo.size; i+=2) { + char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); + long *fieldoffsets = (long*)d->abortinfo.items[i+1]; + long kind, offset; + size_t rps_size; + RPyString *rps; + + while (1) { + kind = *fieldoffsets++; + if (kind <= 0) { + if (kind == -2) { + WRITE('l'); /* '[', start of sublist */ + continue; + } + if (kind == -1) { + WRITE('e'); /* ']', end of sublist */ + continue; + } + break; /* 0, terminator */ + } + offset = *fieldoffsets++; + switch(kind) { + case 1: /* signed */ + res_size = sprintf(buffer, "i%lde", + *(long*)(object + offset)); + WRITE_BUF(buffer, res_size); + break; + case 2: /* unsigned */ + res_size = sprintf(buffer, "i%lue", + *(unsigned long*)(object + offset)); + WRITE_BUF(buffer, res_size); + break; + case 3: /* pointer to STR */ + rps = *(RPyString **)(object + offset); + if (rps) { + rps_size = RPyString_Size(rps); + res_size = sprintf(buffer, "%zu:", rps_size); + WRITE_BUF(buffer, res_size); + WRITE_BUF(_RPyString_AsString(rps), rps_size); + } + else { + WRITE_BUF("0:", 2); + } + break; + default: + fprintf(stderr, "Fatal RPython error: corrupted abort log\n"); + abort(); + } + } + } + WRITE('e'); + WRITE('\0'); /* final null character */ +#undef WRITE + return totalsize; +} +#endif + +char *stm_inspect_abort_info(void) +{ + struct tx_descriptor *d = thread_descriptor; + if (d->longest_abort_info_time <= 0) + return NULL; + d->longest_abort_info_time = 0; + return d->longest_abort_info; +} diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -101,6 +101,18 @@ It is set to NULL by stm_initialize(). */ extern __thread gcptr stm_thread_local_obj; +/* For tracking where aborts occurs, you can push/pop information + into this stack. When an abort occurs this information is encoded + and flattened into a buffer which can later be retrieved with + stm_inspect_abort_info(). (XXX details not documented yet) */ +void stm_abort_info_push(gcptr obj, long fieldoffsets[]); +void stm_abort_info_pop(long count); +char *stm_inspect_abort_info(void); + + + +/**************** END OF PUBLIC INTERFACE *****************/ +/************************************************************/ /* macro-like functionality */ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -65,6 +65,9 @@ long stm_atomic(long delta); int stm_enter_callback_call(void); void stm_leave_callback_call(int); + void stm_abort_info_push(void *obj, void *fieldoffsets); + void stm_abort_info_pop(long count); + char *stm_inspect_abort_info(void); /* extra non-public code */ void printfcolor(char *msg); diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py new file mode 100644 --- /dev/null +++ b/c4/test/test_extra.py @@ -0,0 +1,19 @@ +import py +from support import * + + +def setup_function(f): + lib.stm_clear_between_tests() + lib.stm_initialize_tests(getattr(f, 'max_aborts', 0)) + +def teardown_function(_): + lib.stm_finalize() + + +def test_abort_info_stack(): + p = nalloc(HDR) + q = nalloc(HDR) + lib.stm_abort_info_push(p, ffi.cast("void *", 123)) + lib.stm_abort_info_push(q, ffi.cast("void *", 125)) + lib.stm_abort_info_pop(2) + # no real test here From noreply at buildbot.pypy.org Sat Jul 6 16:49:30 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 6 Jul 2013 16:49:30 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: The objective of this branch is to completely remove the oo typesystem Message-ID: <20130706144930.1B46E1C00B9@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65230:023336d4e39c Date: 2013-07-06 15:57 +0200 http://bitbucket.org/pypy/pypy/changeset/023336d4e39c/ Log: The objective of this branch is to completely remove the oo typesystem and oo backends. From noreply at buildbot.pypy.org Sat Jul 6 16:49:31 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 6 Jul 2013 16:49:31 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove ootype support from the annotator Message-ID: <20130706144931.77CA21C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65231:eee1c17aebdf Date: 2013-07-06 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/eee1c17aebdf/ Log: Remove ootype support from the annotator diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -866,12 +866,8 @@ # ____________________________________________________________ # annotation of low-level types -from rpython.annotator.model import SomePtr, SomeOOInstance, SomeOOClass -from rpython.annotator.model import SomeOOObject +from rpython.annotator.model import SomePtr from rpython.annotator.model import ll_to_annotation, annotation_to_lltype -from rpython.rtyper.ootypesystem import ootype - -_make_none_union('SomeOOInstance', 'ootype=obj.ootype, can_be_None=True') class __extend__(pairtype(SomePtr, SomePtr)): def union((p1, p2)): @@ -912,41 +908,6 @@ return pair(p2, obj).union() -class __extend__(pairtype(SomeOOInstance, SomeOOInstance)): - def union((r1, r2)): - common = ootype.commonBaseclass(r1.ootype, r2.ootype) - assert common is not None, 'Mixing of incompatible instances %r, %r' %(r1.ootype, r2.ootype) - return SomeOOInstance(common, can_be_None=r1.can_be_None or r2.can_be_None) - -class __extend__(pairtype(SomeOOClass, SomeOOClass)): - def union((r1, r2)): - if r1.ootype is None: - common = r2.ootype - elif r2.ootype is None: - common = r1.ootype - elif r1.ootype == r2.ootype: - common = r1.ootype - elif isinstance(r1.ootype, ootype.Instance) and isinstance(r2.ootype, ootype.Instance): - common = ootype.commonBaseclass(r1.ootype, r2.ootype) - assert common is not None, ('Mixing of incompatible classes %r, %r' - % (r1.ootype, r2.ootype)) - else: - common = ootype.Object - return SomeOOClass(common) - -class __extend__(pairtype(SomeOOInstance, SomeObject)): - def union((r, obj)): - assert False, ("mixing reference type %r with something else %r" % (r.ootype, obj)) - -class __extend__(pairtype(SomeObject, SomeOOInstance)): - def union((obj, r2)): - return pair(r2, obj).union() - -class __extend__(pairtype(SomeOOObject, SomeOOObject)): - def union((r1, r2)): - assert r1.ootype is ootype.Object and r2.ootype is ootype.Object - return SomeOOObject() - #_________________________________________ # weakrefs diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -7,13 +7,12 @@ import sys, types, inspect, weakref from rpython.flowspace.model import Constant -from rpython.annotator.model import SomeString, SomeChar, SomeFloat, \ - SomePtr, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, \ - SomeInteger, SomeOOInstance, SomeOOObject, TLS, SomeAddress, \ - SomeUnicodeCodePoint, SomeOOStaticMeth, s_None, s_ImpossibleValue, \ - SomeLLADTMeth, SomeBool, SomeTuple, SomeOOClass, SomeImpossibleValue, \ - SomeUnicodeString, SomeList, HarmlesslyBlocked, \ - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray +from rpython.annotator.model import ( + SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, + SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, + s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, + SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, + SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -23,7 +22,6 @@ from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper import extregistry @@ -433,16 +431,6 @@ result = SomePtr(lltype.typeOf(x)) elif isinstance(x, llmemory.fakeaddress): result = SomeAddress() - elif isinstance(x, ootype._static_meth): - result = SomeOOStaticMeth(ootype.typeOf(x)) - elif isinstance(x, ootype._class): - result = SomeOOClass(x._INSTANCE) # NB. can be None - elif isinstance(x, ootype.instance_impl): # XXX - result = SomeOOInstance(ootype.typeOf(x)) - elif isinstance(x, (ootype._record, ootype._string)): - result = SomeOOInstance(ootype.typeOf(x)) - elif isinstance(x, (ootype._object)): - result = SomeOOObject() elif tp is type: if (x is type(None) or # add cases here if needed x.__module__ == 'rpython.rtyper.lltypesystem.lltype'): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -3,20 +3,15 @@ """ import sys -from rpython.annotator.model import SomeInteger, SomeObject, SomeChar, SomeBool -from rpython.annotator.model import SomeString, SomeTuple, s_Bool -from rpython.annotator.model import SomeUnicodeCodePoint, SomeAddress -from rpython.annotator.model import SomeFloat, unionof, SomeUnicodeString -from rpython.annotator.model import SomePBC, SomeInstance, SomeDict, SomeList -from rpython.annotator.model import SomeWeakRef, SomeIterator -from rpython.annotator.model import SomeOOObject, SomeByteArray -from rpython.annotator.model import annotation_to_lltype, lltype_to_annotation, ll_to_annotation -from rpython.annotator.model import add_knowntypedata -from rpython.annotator.model import s_ImpossibleValue +from rpython.annotator.model import ( + SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, + SomeUnicodeCodePoint, SomeAddress, SomeFloat, unionof, SomeUnicodeString, + SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, + SomeByteArray, annotation_to_lltype, lltype_to_annotation, + ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant -from rpython.tool.error import AnnotatorError import rpython.rlib.rarithmetic import rpython.rlib.objectmodel @@ -177,8 +172,8 @@ r.const = isinstance(s_obj.const, typ) elif our_issubclass(s_obj.knowntype, typ): if not s_obj.can_be_none(): - r.const = True - elif not our_issubclass(typ, s_obj.knowntype): + r.const = True + elif not our_issubclass(typ, s_obj.knowntype): r.const = False elif s_obj.knowntype == int and typ == bool: # xxx this will explode in case of generalisation # from bool to int, notice that isinstance( , bool|int) @@ -207,7 +202,7 @@ r.const = hasattr(s_obj.const, s_attr.const) elif (isinstance(s_obj, SomePBC) and s_obj.getKind() is description.FrozenDesc): - answers = {} + answers = {} for d in s_obj.descriptions: answer = (d.s_read_attribute(s_attr.const) != s_ImpossibleValue) answers[answer] = True @@ -344,7 +339,7 @@ return SomeAddress() def unicodedata_decimal(s_uchr): - raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" + raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" def test(*args): return s_Bool @@ -395,7 +390,7 @@ if hasattr(object.__init__, 'im_func'): BUILTIN_ANALYZERS[object.__init__.im_func] = object_init else: - BUILTIN_ANALYZERS[object.__init__] = object_init + BUILTIN_ANALYZERS[object.__init__] = object_init # import BUILTIN_ANALYZERS[__import__] = import_func @@ -490,7 +485,7 @@ return SomePtr(ll_ptrtype=PtrT.const) def identityhash(s_obj): - assert isinstance(s_obj, (SomePtr, SomeOOObject, SomeOOInstance)) + assert isinstance(s_obj, SomePtr) return SomeInteger() def getRuntimeTypeInfo(T): @@ -523,92 +518,6 @@ BUILTIN_ANALYZERS[lltype.runtime_type_info] = runtime_type_info BUILTIN_ANALYZERS[lltype.Ptr] = constPtr -# ootype -from rpython.annotator.model import SomeOOInstance, SomeOOClass, SomeOOStaticMeth -from rpython.rtyper.ootypesystem import ootype - -def new(I): - assert I.is_constant() - i = ootype.new(I.const) - r = SomeOOInstance(ootype.typeOf(i)) - return r - -def oonewarray(s_type, length): - assert s_type.is_constant() - return SomeOOInstance(s_type.const) - -def null(I_OR_SM): - assert I_OR_SM.is_constant() - null = ootype.null(I_OR_SM.const) - r = lltype_to_annotation(ootype.typeOf(null)) - return r - -def instanceof(i, I): - assert I.is_constant() - assert isinstance(I.const, ootype.Instance) - return s_Bool - -def classof(i): - assert isinstance(i, SomeOOInstance) - return SomeOOClass(i.ootype) - -def subclassof(class1, class2): - assert isinstance(class1, SomeOOClass) - assert isinstance(class2, SomeOOClass) - return s_Bool - -def runtimenew(c): - assert isinstance(c, SomeOOClass) - if c.ootype is None: - return s_ImpossibleValue # can't call runtimenew(NULL) - else: - return SomeOOInstance(c.ootype) - -def ooupcast(I, i): - assert isinstance(I.const, ootype.Instance) - if ootype.isSubclass(i.ootype, I.const): - return SomeOOInstance(I.const) - else: - raise AnnotatorError, 'Cannot cast %s to %s' % (i.ootype, I.const) - -def oodowncast(I, i): - assert isinstance(I.const, ootype.Instance) - if ootype.isSubclass(I.const, i.ootype): - return SomeOOInstance(I.const) - else: - raise AnnotatorError, 'Cannot cast %s to %s' % (i.ootype, I.const) - -def cast_to_object(obj): - assert isinstance(obj, SomeOOStaticMeth) or \ - (isinstance(obj, SomeOOClass) and obj.ootype is None) or \ - isinstance(obj.ootype, ootype.OOType) - return SomeOOObject() - -def cast_from_object(T, obj): - TYPE = T.const - if TYPE is ootype.Object: - return SomeOOObject() - elif TYPE is ootype.Class: - return SomeOOClass(ootype.ROOT) # ??? - elif isinstance(TYPE, ootype.StaticMethod): - return SomeOOStaticMeth(TYPE) - elif isinstance(TYPE, ootype.OOType): - return SomeOOInstance(TYPE) - else: - raise AnnotatorError, 'Cannot cast Object to %s' % TYPE - -BUILTIN_ANALYZERS[ootype.instanceof] = instanceof -BUILTIN_ANALYZERS[ootype.new] = new -BUILTIN_ANALYZERS[ootype.oonewarray] = oonewarray -BUILTIN_ANALYZERS[ootype.null] = null -BUILTIN_ANALYZERS[ootype.runtimenew] = runtimenew -BUILTIN_ANALYZERS[ootype.classof] = classof -BUILTIN_ANALYZERS[ootype.subclassof] = subclassof -BUILTIN_ANALYZERS[ootype.ooupcast] = ooupcast -BUILTIN_ANALYZERS[ootype.oodowncast] = oodowncast -BUILTIN_ANALYZERS[ootype.cast_to_object] = cast_to_object -BUILTIN_ANALYZERS[ootype.cast_from_object] = cast_from_object - #________________________________ # weakrefs diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -603,36 +603,6 @@ return False -class SomeOOObject(SomeObject): - def __init__(self): - from rpython.rtyper.ootypesystem import ootype - self.ootype = ootype.Object - - -class SomeOOClass(SomeObject): - def __init__(self, ootype): - self.ootype = ootype - - -class SomeOOInstance(SomeObject): - def __init__(self, ootype, can_be_None=False): - self.ootype = ootype - self.can_be_None = can_be_None - - -class SomeOOBoundMeth(SomeObject): - immutable = True - - def __init__(self, ootype, name): - self.ootype = ootype - self.name = name - - -class SomeOOStaticMeth(SomeObject): - immutable = True - - def __init__(self, method): - self.method = method annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), @@ -647,16 +617,6 @@ def annotation_to_lltype(s_val, info=None): - from rpython.rtyper.ootypesystem import ootype - - if isinstance(s_val, SomeOOInstance): - return s_val.ootype - if isinstance(s_val, SomeOOStaticMeth): - return s_val.method - if isinstance(s_val, SomeOOClass): - return ootype.Class - if isinstance(s_val, SomeOOObject): - return s_val.ootype if isinstance(s_val, SomeInteriorPtr): p = s_val.ll_ptrtype if 0 in p.offsets: @@ -683,8 +643,6 @@ def lltype_to_annotation(T): - from rpython.rtyper.ootypesystem import ootype - try: s = ll_to_annotation_map.get(T) except TypeError: @@ -694,14 +652,6 @@ return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) - if isinstance(T, (ootype.Instance, ootype.BuiltinType)): - return SomeOOInstance(T) - elif isinstance(T, ootype.StaticMethod): - return SomeOOStaticMeth(T) - elif T == ootype.Class: - return SomeOOClass(ootype.ROOT) - elif T == ootype.Object: - return SomeOOObject() elif isinstance(T, lltype.InteriorPtr): return SomeInteriorPtr(T) else: diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -2,7 +2,6 @@ from rpython.annotator.model import * from rpython.annotator.listdef import ListDef -from rpython.rtyper.ootypesystem import ootype listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) @@ -57,11 +56,11 @@ (s6, s6)]) def test_commonbase_simple(): - class A0: + class A0: pass - class A1(A0): + class A1(A0): pass - class A2(A0): + class A2(A0): pass class B1(object): pass @@ -73,10 +72,10 @@ except TypeError: # if A0 is also a new-style class, e.g. in PyPy class B3(A0, object): pass - assert commonbase(A1,A2) is A0 + assert commonbase(A1,A2) is A0 assert commonbase(A1,A0) is A0 assert commonbase(A1,A1) is A1 - assert commonbase(A2,B2) is object + assert commonbase(A2,B2) is object assert commonbase(A2,B3) is A0 def test_list_union(): @@ -115,9 +114,6 @@ assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(S) s_p = ll_to_annotation(lltype.malloc(A, 0)) assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(A) - C = ootype.Instance('C', ootype.ROOT, {}) - s_p = ll_to_annotation(ootype.new(C)) - assert isinstance(s_p, SomeOOInstance) and s_p.ootype == C def test_annotation_to_lltype(): from rpython.rlib.rarithmetic import r_uint, r_singlefloat @@ -125,8 +121,8 @@ s_pos = SomeInteger(nonneg=True) s_1 = SomeInteger(nonneg=True); s_1.const = 1 s_m1 = SomeInteger(nonneg=False); s_m1.const = -1 - s_u = SomeInteger(nonneg=True, unsigned=True); - s_u1 = SomeInteger(nonneg=True, unsigned=True); + s_u = SomeInteger(nonneg=True, unsigned=True); + s_u1 = SomeInteger(nonneg=True, unsigned=True); s_u1.const = r_uint(1) assert annotation_to_lltype(s_i) == lltype.Signed assert annotation_to_lltype(s_pos) == lltype.Signed @@ -140,13 +136,10 @@ s_p = SomePtr(ll_ptrtype=PS) assert annotation_to_lltype(s_p) == PS py.test.raises(ValueError, "annotation_to_lltype(si0)") - C = ootype.Instance('C', ootype.ROOT, {}) - ref = SomeOOInstance(C) - assert annotation_to_lltype(ref) == C s_singlefloat = SomeSingleFloat() s_singlefloat.const = r_singlefloat(0.0) assert annotation_to_lltype(s_singlefloat) == lltype.SingleFloat - + def test_ll_union(): PS1 = lltype.Ptr(lltype.GcStruct('s')) PS2 = lltype.Ptr(lltype.GcStruct('s')) @@ -172,29 +165,6 @@ py.test.raises(AssertionError, "unionof(SomeInteger(), SomePtr(PS1))") py.test.raises(AssertionError, "unionof(SomeObject(), SomePtr(PS1))") -def test_oo_union(): - C1 = ootype.Instance("C1", ootype.ROOT) - C2 = ootype.Instance("C2", C1) - C3 = ootype.Instance("C3", C1) - D = ootype.Instance("D", ootype.ROOT) - assert unionof(SomeOOInstance(C1), SomeOOInstance(C1)) == SomeOOInstance(C1) - assert unionof(SomeOOInstance(C1), SomeOOInstance(C2)) == SomeOOInstance(C1) - assert unionof(SomeOOInstance(C2), SomeOOInstance(C1)) == SomeOOInstance(C1) - assert unionof(SomeOOInstance(C2), SomeOOInstance(C3)) == SomeOOInstance(C1) - - assert unionof(SomeOOInstance(C1),SomeImpossibleValue()) == SomeOOInstance(C1) - assert unionof(SomeImpossibleValue(), SomeOOInstance(C1)) == SomeOOInstance(C1) - - assert unionof(SomeOOInstance(C1), SomeOOInstance(D)) == SomeOOInstance(ootype.ROOT) - -def test_ooclass_array_contains(): - A = ootype.Array(ootype.Signed) - cls = ootype.runtimeClass(A) - s1 = SomeOOClass(A) - s2 = SomeOOClass(A) - s2.const=cls - assert s1.contains(s2) - def test_nan(): f1 = SomeFloat() f1.const = float("nan") diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -758,7 +758,6 @@ # annotation of low-level types from rpython.annotator.model import SomePtr, SomeLLADTMeth -from rpython.annotator.model import SomeOOInstance, SomeOOBoundMeth, SomeOOStaticMeth from rpython.annotator.model import ll_to_annotation, lltype_to_annotation, annotation_to_lltype class __extend__(SomePtr): @@ -813,56 +812,6 @@ s_func = bookkeeper.immutablevalue(adtmeth.func) return s_func.call(args.prepend(lltype_to_annotation(adtmeth.ll_ptrtype))) -from rpython.rtyper.ootypesystem import ootype -class __extend__(SomeOOInstance): - def getattr(r, s_attr): - assert s_attr.is_constant(), "getattr on ref %r with non-constant field-name" % r.ootype - v = getattr(r.ootype._example(), s_attr.const) - if isinstance(v, ootype._bound_meth): - return SomeOOBoundMeth(r.ootype, s_attr.const) - return ll_to_annotation(v) - - def setattr(r, s_attr, s_value): - assert s_attr.is_constant(), "setattr on ref %r with non-constant field-name" % r.ootype - v = annotation_to_lltype(s_value) - example = r.ootype._example() - if example is not None: - setattr(r.ootype._example(), s_attr.const, v._example()) - - def is_true(p): - return s_Bool - -class __extend__(SomeOOBoundMeth): - def simple_call(m, *args_s): - _, meth = m.ootype._lookup(m.name) - if isinstance(meth, ootype._overloaded_meth): - return meth._resolver.annotate(args_s) - else: - METH = ootype.typeOf(meth) - return lltype_to_annotation(METH.RESULT) - - def call(m, args): - args_s, kwds_s = args.unpack() - if kwds_s: - raise Exception("keyword arguments to call to a low-level bound method") - inst = m.ootype._example() - _, meth = ootype.typeOf(inst)._lookup(m.name) - METH = ootype.typeOf(meth) - return lltype_to_annotation(METH.RESULT) - - -class __extend__(SomeOOStaticMeth): - - def call(m, args): - args_s, kwds_s = args.unpack() - if kwds_s: - raise Exception("keyword arguments to call to a low-level static method") - info = 'argument to ll static method call' - llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s] - v = m.method._example()(*llargs) - return ll_to_annotation(v) - - #_________________________________________ # weakrefs From noreply at buildbot.pypy.org Sat Jul 6 17:42:51 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 6 Jul 2013 17:42:51 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove ootype support from rpython.rtyper.rtyper Message-ID: <20130706154251.6F4831C00B9@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65232:64eb95059623 Date: 2013-07-06 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/64eb95059623/ Log: Remove ootype support from rpython.rtyper.rtyper diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -824,15 +824,7 @@ "classes with no common base: %r" % (mdescs,)) self.methodname = methodname - # for ootype, the right thing to do is to always keep the most precise - # type of the instance, while for lltype we want to cast it to the - # type where the method is actually defined. See also - # test_rclass.test_method_specialized_with_subclass and - # rtyper.attach_methods_to_subclasses - if self.rtyper.type_system.name == 'ootypesystem': - self.classdef = classdef - else: - self.classdef = classdef.locate_attribute(methodname) + self.classdef = classdef.locate_attribute(methodname) # the low-level representation is just the bound 'self' argument. self.s_im_self = annmodel.SomeInstance(self.classdef, flags=flags) self.r_im_self = rclass.getinstancerepr(rtyper, self.classdef) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -23,9 +23,8 @@ from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive) -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError -from rpython.rtyper.typesystem import LowLevelTypeSystem, ObjectOrientedTypeSystem +from rpython.rtyper.typesystem import LowLevelTypeSystem from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block @@ -41,8 +40,6 @@ if isinstance(type_system, str): if type_system == "lltype": self.type_system = LowLevelTypeSystem.instance - elif type_system == "ootype": - self.type_system = ObjectOrientedTypeSystem.instance else: raise TyperError("Unknown type system %r!" % type_system) else: @@ -208,8 +205,6 @@ if self.exceptiondata is not None: self.exceptiondata.make_helpers(self) self.specialize_more_blocks() # for the helpers just made - if self.type_system.name == 'ootypesystem': - self.attach_methods_to_subclasses() def getannmixlevel(self): if self.annmixlevel is not None: @@ -275,40 +270,6 @@ if annmixlevel is not None: annmixlevel.finish() - def attach_methods_to_subclasses(self): - # in ootype, it might happen that a method is defined in the - # superclass but the annotator discovers that it's always called - # through instances of a subclass (e.g. because of specialization, see - # test_rclass.test_method_specialized_with_subclass). In that cases, - # we copy the method also in the ootype.Instance of the subclass, so - # that the type of v_self coincides with the type returned by - # _lookup(). - assert self.type_system.name == 'ootypesystem' - def allclasses(TYPE, seen): - '''Yield TYPE and all its subclasses''' - if TYPE in seen: - return - seen.add(TYPE) - yield TYPE - for SUB in TYPE._subclasses: - for T in allclasses(SUB, seen): - yield T - - for TYPE in allclasses(ootype.ROOT, set()): - for methname, meth in TYPE._methods.iteritems(): - try: - graph = meth.graph - except AttributeError: - continue - SELF = graph.getargs()[0].concretetype - if TYPE != SELF and ootype.isSubclass(SELF, TYPE): - # the annotator found that this method has a more precise - # type. Attach it to the proper subclass, so that the type - # of 'self' coincides with the type returned by _lookup(), - # else we might have type errors - if methname not in SELF._methods: - ootype.addMethods(SELF, {methname: meth}) - def dump_typererrors(self, num=None, minimize=True, to_log=False): c = 0 bc = 0 @@ -1015,4 +976,3 @@ from rpython.rtyper import rptr from rpython.rtyper import rweakref from rpython.rtyper import raddress # memory addresses -from rpython.rtyper.ootypesystem import rootype From noreply at buildbot.pypy.org Sat Jul 6 17:42:52 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 6 Jul 2013 17:42:52 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill rpython.rtyper.test.tool.OORtypeMixin and .LLRtypeMixin Message-ID: <20130706154252.B2D951C05DF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65233:ad923d70cccc Date: 2013-07-06 17:40 +0200 http://bitbucket.org/pypy/pypy/changeset/ad923d70cccc/ Log: Kill rpython.rtyper.test.tool.OORtypeMixin and .LLRtypeMixin Update some rlib tests diff --git a/rpython/rlib/rstruct/test/test_runpack.py b/rpython/rlib/rstruct/test/test_runpack.py --- a/rpython/rlib/rstruct/test/test_runpack.py +++ b/rpython/rlib/rstruct/test/test_runpack.py @@ -1,9 +1,9 @@ -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.rarithmetic import LONG_BIT import struct -class BaseTestRStruct(BaseRtypingTest): +class TestRStruct(BaseRtypingTest): def test_unpack(self): pad = '\x00' * (LONG_BIT//8-1) # 3 or 7 null bytes def fn(): @@ -37,9 +37,3 @@ return runpack(">d", "testtest") assert fn() == struct.unpack(">d", "testtest")[0] assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0] - -class TestLLType(BaseTestRStruct, LLRtypeMixin): - pass - -class TestOOType(BaseTestRStruct, OORtypeMixin): - pass diff --git a/rpython/rlib/test/test__jit_vref.py b/rpython/rlib/test/test__jit_vref.py --- a/rpython/rlib/test/test__jit_vref.py +++ b/rpython/rlib/test/test__jit_vref.py @@ -9,7 +9,7 @@ from rpython.rtyper.ootypesystem.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.ootypesystem import ootype @@ -91,7 +91,11 @@ assert s.s_instance.can_be_None assert s.s_instance.classdef == a.bookkeeper.getuniqueclassdef(X) -class BaseTestVRef(BaseRtypingTest): +class TestVRef(BaseRtypingTest): + OBJECTTYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + def test_rtype_1(self): def f(): return virtual_ref(X()) @@ -144,14 +148,3 @@ return vref.virtual x = self.interpret(f, []) assert x is False - - -class TestLLtype(BaseTestVRef, LLRtypeMixin): - OBJECTTYPE = OBJECTPTR - def castable(self, TO, var): - return lltype.castable(TO, lltype.typeOf(var)) > 0 - -class TestOOtype(BaseTestVRef, OORtypeMixin): - OBJECTTYPE = OBJECT - def castable(self, TO, var): - return ootype.isSubclass(lltype.typeOf(var), TO) diff --git a/rpython/rlib/test/test_debug.py b/rpython/rlib/test/test_debug.py --- a/rpython/rlib/test/test_debug.py +++ b/rpython/rlib/test/test_debug.py @@ -11,14 +11,14 @@ def test_check_annotation(): class Error(Exception): pass - + def checker(ann, bk): from rpython.annotator.model import SomeList, SomeInteger if not isinstance(ann, SomeList): raise Error() if not isinstance(ann.listdef.listitem.s_value, SomeInteger): raise Error() - + def f(x): result = [x] check_annotation(result, checker) @@ -50,7 +50,7 @@ result.append(4) return len(result) - py.test.raises(ListChangeUnallowed, interpret, f, [], + py.test.raises(ListChangeUnallowed, interpret, f, [], list_comprehension_operations=True) def test_mark_dict_non_null(): @@ -98,11 +98,5 @@ ]), ] - -class TestLLType(DebugTests): def interpret(self, f, args): return interpret(f, args, type_system='lltype') - -class TestOOType(DebugTests): - def interpret(self, f, args): - return interpret(f, args, type_system='ootype') diff --git a/rpython/rlib/test/test_jit.py b/rpython/rlib/test/test_jit.py --- a/rpython/rlib/test/test_jit.py +++ b/rpython/rlib/test/test_jit.py @@ -5,7 +5,7 @@ from rpython.rlib.jit import (hint, we_are_jitted, JitDriver, elidable_promote, JitHintError, oopspec, isconstant) from rpython.rlib.rarithmetic import r_uint -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem import lltype @@ -73,9 +73,9 @@ assert driver2.foo == 'bar' driver.foo = 'xxx' assert driver2.foo == 'bar' - -class BaseTestJIT(BaseRtypingTest): + +class TestJIT(BaseRtypingTest): def test_hint(self): def f(): x = hint(5, hello="world") @@ -109,11 +109,11 @@ return func + 1 def f(x): return g(x * 2, x) - + import dis from StringIO import StringIO import sys - + s = StringIO() prev = sys.stdout sys.stdout = s @@ -131,9 +131,9 @@ assert res == 5 def test_annotate_hooks(self): - + def get_printable_location(m): pass - + myjitdriver = JitDriver(greens=['m'], reds=['n'], get_printable_location=get_printable_location) def fn(n): @@ -247,10 +247,3 @@ # this used to fail on 64-bit, because r_uint == r_ulonglong myjitdriver = JitDriver(greens=['i1'], reds=[]) myjitdriver.jit_merge_point(i1=r_uint(42)) - - -class TestJITLLtype(BaseTestJIT, LLRtypeMixin): - pass - -class TestJITOOtype(BaseTestJIT, OORtypeMixin): - pass diff --git a/rpython/rlib/test/test_nonconst.py b/rpython/rlib/test/test_nonconst.py --- a/rpython/rlib/test/test_nonconst.py +++ b/rpython/rlib/test/test_nonconst.py @@ -17,8 +17,6 @@ s = a.build_types(nonconst_f, []) assert s.knowntype is int assert not hasattr(s, 'const') - #rtyper = a.translator.buildrtyper(type_system="ootype") - #rtyper.specialize() def test_nonconst_list(): @@ -41,8 +39,6 @@ a = RPythonAnnotator() s = a.build_types(nonconst_i, []) - rtyper = a.translator.buildrtyper(type_system="ootype") - rtyper.specialize() if option.view: a.translator.view() assert isinstance(s, SomeInstance) @@ -58,7 +54,5 @@ assert s.knowntype is bool assert not hasattr(s, 'const') - rtyper = a.translator.buildrtyper(type_system="ootype") - rtyper.specialize() if option.view: a.translator.view() diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -3,7 +3,7 @@ from rpython.rlib import types from rpython.annotator import model from rpython.translator.translator import TranslationContext, graphof -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret from rpython.conftest import option @@ -54,13 +54,13 @@ assert x == 42 count += 1 assert count == 1 - + count = 0 for x in d.iteritems(): assert len(x) == 2 and x[0] == 'hello' and x[1] == 42 count += 1 assert count == 1 - + d.clear() assert d.keys() == [] return True # for the tests below @@ -189,7 +189,7 @@ foo = Foo() assert current_object_addr_as_int(foo) == intmask(id(foo)) -class BaseTestObjectModel(BaseRtypingTest): +class TestObjectModel(BaseRtypingTest): def test_we_are_translated(self): assert we_are_translated() == False @@ -198,7 +198,7 @@ return we_are_translated() res = self.interpret(fn, []) assert res is True - + def test_rtype_r_dict(self): res = self.interpret(test_r_dict, []) assert res is True @@ -239,7 +239,7 @@ def _freeze_(self): return True obj = FreezingClass() - pbc_d = r_dict(obj.key_eq, obj.key_hash) + pbc_d = r_dict(obj.key_eq, obj.key_hash) def fn(): return play_with_r_dict(pbc_d) assert self.interpret(fn, []) is True @@ -356,8 +356,6 @@ assert self.interpret(fn, [15]) == 11 -class TestLLtype(BaseTestObjectModel, LLRtypeMixin): - def test_rtype_keepalive(self): from rpython.rlib import objectmodel def f(): @@ -395,21 +393,17 @@ h_None = compute_hash(None) h_tuple = compute_hash(("world", None, 42, 7.5)) h_q = compute_hash(q) - + res = self.interpret(f, [42]) assert res == 84 -class TestOOtype(BaseTestObjectModel, OORtypeMixin): - pass - - def test_specialize_decorator(): def f(): pass specialize.memo()(f) - + assert f._annspecialcase_ == 'specialize:memo' specialize.arg(0)(f) @@ -432,7 +426,7 @@ exc = py.test.raises(TypeError, "f(1, 2, 3)") assert exc.value.message == "f argument 'b' must be of type " py.test.raises(TypeError, "f('hello', 'world', 3)") - + def test_enforceargs_defaults(): @enforceargs(int, int) @@ -529,7 +523,7 @@ if llop.opname == 'malloc_varsize': break assert llop.args[2] is graph.startblock.inputargs[0] - + def test_resizelist_hint(): from rpython.annotator.model import SomeInteger def f(z): diff --git a/rpython/rlib/test/test_rarithmetic.py b/rpython/rlib/test/test_rarithmetic.py --- a/rpython/rlib/test/test_rarithmetic.py +++ b/rpython/rlib/test/test_rarithmetic.py @@ -1,4 +1,4 @@ -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.test.test_llinterp import interpret from rpython.rlib.rarithmetic import * from rpython.rlib.rstring import ParseStringError, ParseStringOverflowError @@ -68,7 +68,7 @@ left, right = types cmp = f(left(larg), right(rarg)) assert res == cmp - + class Test_r_uint: def test__add__(self): self.binary_test(lambda x, y: x + y) @@ -115,12 +115,12 @@ def unary_test(self, f): for arg in (0, 3, 12345): - res = f(arg) & maxint_mask + res = f(arg) & maxint_mask cmp = f(r_uint(arg)) assert res == cmp def binary_test(self, f, rargs = None, translated=False): - mask = maxint_mask + mask = maxint_mask if not rargs: rargs = (1, 3, 55) # when translated merging different int types is not allowed @@ -178,7 +178,7 @@ assert intmask(2*sys.maxint+1) == -1 assert intmask(sys.maxint*2) == -2 assert intmask(sys.maxint*2+2) == 0 - assert intmask(2*(sys.maxint*1+1)) == 0 + assert intmask(2*(sys.maxint*1+1)) == 0 assert intmask(1 << (machbits-1)) == 1 << (machbits-1) assert intmask(sys.maxint+1) == minint assert intmask(minint-1) == sys.maxint @@ -229,13 +229,13 @@ except OverflowError: assert False else: - pass + pass try: ovfcheck(n-n) except OverflowError: assert False else: - pass + pass # overflowing try: @@ -313,7 +313,7 @@ assert x != 2.5 py.test.raises(TypeError, "x>y") -class BaseTestRarithmetic(BaseRtypingTest): +class TestRarithmetic(BaseRtypingTest): def test_compare_singlefloat_crashes(self): from rpython.rlib.rarithmetic import r_singlefloat from rpython.rtyper.error import MissingRTypeOperation @@ -332,11 +332,6 @@ res = self.interpret(f, [123]) assert res == 4 + 2 -class TestLLtype(BaseTestRarithmetic, LLRtypeMixin): - pass - -class TestOOtype(BaseTestRarithmetic, OORtypeMixin): - pass def test_int_real_union(): from rpython.rtyper.lltypesystem.rffi import r_int_real diff --git a/rpython/rlib/test/test_rerased.py b/rpython/rlib/test/test_rerased.py --- a/rpython/rlib/test/test_rerased.py +++ b/rpython/rlib/test/test_rerased.py @@ -10,7 +10,7 @@ from rpython.rtyper.ootypesystem.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest def make_annotator(): a = RPythonAnnotator() @@ -186,6 +186,11 @@ assert isinstance(s, annmodel.SomeInteger) class BaseTestRErased(BaseRtypingTest): + ERASED_TYPE = llmemory.GCREF + UNERASED_TYPE = OBJECTPTR + def castable(self, TO, var): + return lltype.castable(TO, lltype.typeOf(var)) > 0 + def interpret(self, *args, **kwargs): kwargs["taggedpointers"] = True return BaseRtypingTest.interpret(self, *args, **kwargs) @@ -296,23 +301,6 @@ self.interpret(l, [1]) self.interpret(l, [2]) -class TestLLtype(BaseTestRErased, LLRtypeMixin): - ERASED_TYPE = llmemory.GCREF - UNERASED_TYPE = OBJECTPTR - def castable(self, TO, var): - return lltype.castable(TO, lltype.typeOf(var)) > 0 - -from rpython.rtyper.ootypesystem.ootype import Object - -class TestOOtype(BaseTestRErased, OORtypeMixin): - ERASED_TYPE = Object - UNERASED_TYPE = OBJECT - def castable(self, TO, var): - return ootype.isSubclass(lltype.typeOf(var), TO) - @py.test.mark.xfail - def test_prebuilt_erased(self): - super(TestOOtype, self).test_prebuilt_erased() - def test_union(): s_e1 = SomeErased() s_e1.const = 1 diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -1,7 +1,6 @@ import py, errno, sys from rpython.rlib import rsocket from rpython.rlib.rsocket import * -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin import socket as cpy_socket # cannot test error codes in Win32 because ll2ctypes doesn't save diff --git a/rpython/rlib/test/test_rzipfile.py b/rpython/rlib/test/test_rzipfile.py --- a/rpython/rlib/test/test_rzipfile.py +++ b/rpython/rlib/test/test_rzipfile.py @@ -3,7 +3,7 @@ from rpython.rlib.rzipfile import RZipFile from rpython.tool.udir import udir from zipfile import ZIP_STORED, ZIP_DEFLATED, ZipInfo, ZipFile -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest import os import time @@ -25,9 +25,9 @@ # Value selected to produce a CRC32 which is negative if # interpreted as a signed 32 bit integer. This exercises the # masking behavior necessary on 64 bit platforms. - zipfile.writestr("three", "hello, world") + zipfile.writestr("three", "hello, world") zipfile.close() - + def test_rzipfile(self): zipname = self.zipname year = self.year @@ -42,8 +42,8 @@ assert one() assert self.interpret(one, []) -class TestRZipFile(BaseTestRZipFile, LLRtypeMixin): +class TestRZipFile(BaseTestRZipFile): compression = ZIP_STORED -class TestRZipFileCompressed(BaseTestRZipFile, LLRtypeMixin): +class TestRZipFileCompressed(BaseTestRZipFile): compression = ZIP_DEFLATED diff --git a/rpython/rtyper/test/tool.py b/rpython/rtyper/test/tool.py --- a/rpython/rtyper/test/tool.py +++ b/rpython/rtyper/test/tool.py @@ -1,17 +1,16 @@ import py -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.test.test_llinterp import gengraph, interpret, interpret_raises class BaseRtypingTest(object): - + type_system = 'lltype' FLOAT_PRECISION = 8 def gengraph(self, func, argtypes=[], viewbefore='auto', policy=None, backendopt=False, config=None): return gengraph(func, argtypes, viewbefore, policy, type_system=self.type_system, backendopt=backendopt, config=config) - + def interpret(self, fn, args, **kwds): return interpret(fn, args, type_system=self.type_system, **kwds) @@ -38,12 +37,10 @@ def _skip_llinterpreter(self, reason, skipLL=True, skipOO=True): if skipLL and self.type_system == 'lltype': - py.test.skip("lltypesystem doesn't support %s, yet" % reason) + py.test.skip("lltypesystem doesn't support %s, yet" % reason) if skipOO and self.type_system == 'ootype': - py.test.skip("ootypesystem doesn't support %s, yet" % reason) + py.test.skip("ootypesystem doesn't support %s, yet" % reason) -class LLRtypeMixin(object): - type_system = 'lltype' def ll_to_string(self, s): if not s: @@ -54,11 +51,11 @@ return u''.join(s.chars) def string_to_ll(self, s): - from rpython.rtyper.module.support import LLSupport + from rpython.rtyper.module.support import LLSupport return LLSupport.to_rstr(s) def unicode_to_ll(self, s): - from rpython.rtyper.module.support import LLSupport + from rpython.rtyper.module.support import LLSupport return LLSupport.to_runicode(s) def ll_to_list(self, l): @@ -90,42 +87,3 @@ def is_of_instance_type(self, val): T = lltype.typeOf(val) return isinstance(T, lltype.Ptr) and isinstance(T.TO, lltype.GcStruct) - - -class OORtypeMixin(object): - type_system = 'ootype' - - def ll_to_string(self, s): - return s._str - - ll_to_unicode = ll_to_string - - def string_to_ll(self, s): - from rpython.rtyper.module.support import OOSupport - return OOSupport.to_rstr(s) - - def unicode_to_ll(self, u): - from rpython.rtyper.module.support import OOSupport - return OOSupport.to_runicode(u) - - def ll_to_list(self, l): - if hasattr(l, '_list'): - return l._list[:] - return l._array[:] - - def ll_unpack_tuple(self, t, length): - return tuple([getattr(t, 'item%d' % i) for i in range(length)]) - - def get_callable(self, sm): - return sm._callable - - def class_name(self, value): - return ootype.dynamicType(value)._name.split(".")[-1] - - def read_attr(self, value, attr): - value = ootype.oodowncast(ootype.dynamicType(value), value) - return getattr(value, "o" + attr) - - def is_of_instance_type(self, val): - T = lltype.typeOf(val) - return isinstance(T, ootype.Instance) From noreply at buildbot.pypy.org Sat Jul 6 18:52:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:31 +0200 (CEST) Subject: [pypy-commit] stmgc default: Unsigned Message-ID: <20130706165231.D41511C2FBF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r358:cf5491788056 Date: 2013-07-06 15:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/cf5491788056/ Log: Unsigned diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -1,4 +1,4 @@ -import py +import py, sys from support import * @@ -32,3 +32,23 @@ c = lib.stm_inspect_abort_info() assert c assert ffi.string(c).endswith("eli-421289712eee") + +def test_inspect_abort_info_nested_unsigned(): + fo1 = ffi.new("long[]", [-2, 2, HDR, 0]) + fo2 = ffi.new("long[]", [2, HDR + WORD, -1, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc(HDR + WORD) + q = nalloc(HDR + 2 * WORD) + lib.setlong(p, 0, sys.maxint) + lib.setlong(q, 1, -1) + lib.stm_abort_info_push(p, fo1) + lib.stm_abort_info_push(q, fo2) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("eli%dei%deee" % ( + sys.maxint, sys.maxint * 2 + 1)) From noreply at buildbot.pypy.org Sat Jul 6 18:52:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:30 +0200 (CEST) Subject: [pypy-commit] stmgc default: Test and pass with integers Message-ID: <20130706165230.BD3D71C1471@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r357:bca8a7e8a36a Date: 2013-07-06 15:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/bca8a7e8a36a/ Log: Test and pass with integers diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -156,9 +156,6 @@ size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, int abort_reason, char *output) { - return 1; -} -#if 0 /* re-encodes the abort info as a single string. For convenience (no escaping needed, no limit on integer sizes, etc.) we follow the bittorrent format. */ @@ -191,11 +188,11 @@ WRITE_BUF(buffer, res_size); WRITE('e'); for (i=0; iabortinfo.size; i+=2) { - char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); + char *object = (char *)d->abortinfo.items[i+0]; long *fieldoffsets = (long*)d->abortinfo.items[i+1]; long kind, offset; size_t rps_size; - RPyString *rps; + char *rps; while (1) { kind = *fieldoffsets++; @@ -222,13 +219,14 @@ *(unsigned long*)(object + offset)); WRITE_BUF(buffer, res_size); break; - case 3: /* pointer to STR */ - rps = *(RPyString **)(object + offset); + case 3: /* a string of bytes from the target object */ + rps = *(char **)(object + offset); + offset = *fieldoffsets++; if (rps) { - rps_size = RPyString_Size(rps); + rps_size = stmcb_size((gcptr)rps) - offset; res_size = sprintf(buffer, "%zu:", rps_size); WRITE_BUF(buffer, res_size); - WRITE_BUF(_RPyString_AsString(rps), rps_size); + WRITE_BUF(rps + offset, rps_size); } else { WRITE_BUF("0:", 2); @@ -245,7 +243,6 @@ #undef WRITE_BUF return totalsize; } -#endif char *stm_inspect_abort_info(void) { diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -19,7 +19,6 @@ # no real test here def test_inspect_abort_info_signed(): - py.test.skip("in-progress") fo1 = ffi.new("long[]", [-2, 1, HDR, -1, 0]) # @perform_transaction @@ -32,4 +31,4 @@ else: c = lib.stm_inspect_abort_info() assert c - assert ffi.string(c) == "???" + assert ffi.string(c).endswith("eli-421289712eee") From noreply at buildbot.pypy.org Sat Jul 6 18:52:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:32 +0200 (CEST) Subject: [pypy-commit] stmgc default: Strings Message-ID: <20130706165232.EDA561C3157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r359:559cb1191040 Date: 2013-07-06 15:14 +0200 http://bitbucket.org/pypy/stmgc/changeset/559cb1191040/ Log: Strings diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -224,6 +224,7 @@ offset = *fieldoffsets++; if (rps) { rps_size = stmcb_size((gcptr)rps) - offset; + assert(rps_size >= 0); res_size = sprintf(buffer, "%zu:", rps_size); WRITE_BUF(buffer, res_size); WRITE_BUF(rps + offset, rps_size); diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -1,4 +1,4 @@ -import py, sys +import py, sys, struct from support import * @@ -52,3 +52,23 @@ assert c assert ffi.string(c).endswith("eli%dei%deee" % ( sys.maxint, sys.maxint * 2 + 1)) + +def test_inspect_abort_info_string(): + fo1 = ffi.new("long[]", [3, HDR, HDR + 1, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc_refs(1) + q = nalloc(HDR + 2 * WORD) + lib.setptr(p, 0, q) + lib.setlong(q, 0, -937251491) + lib.setlong(q, 1, -389541051) + lib.stm_abort_info_push(p, fo1) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + expected = struct.pack("ll", -937251491, -389541051) + assert ffi.string(c).endswith("e%d:%se" % ( + len(expected) - 1, expected[1:])) From noreply at buildbot.pypy.org Sat Jul 6 18:52:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:34 +0200 (CEST) Subject: [pypy-commit] stmgc default: NULL Message-ID: <20130706165234.1765E1C1471@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r360:8c4673583792 Date: 2013-07-06 15:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/8c4673583792/ Log: NULL diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -72,3 +72,18 @@ expected = struct.pack("ll", -937251491, -389541051) assert ffi.string(c).endswith("e%d:%se" % ( len(expected) - 1, expected[1:])) + +def test_inspect_null(): + fo1 = ffi.new("long[]", [3, HDR, HDR + 1, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc_refs(1) + lib.setptr(p, 0, ffi.NULL) # default + lib.stm_abort_info_push(p, fo1) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("e0:e") From noreply at buildbot.pypy.org Sat Jul 6 18:52:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: Read the latest version of an object inside the aborting transaction. Message-ID: <20130706165235.1F1381C1471@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r361:15dbb73844ab Date: 2013-07-06 15:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/15dbb73844ab/ Log: Read the latest version of an object inside the aborting transaction. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -248,6 +248,36 @@ } } +gcptr stm_RepeatReadBarrier(gcptr P) +{ + /* Version of stm_DirectReadBarrier() that doesn't abort and assumes + * that 'P' was already an up-to-date result of a previous + * stm_DirectReadBarrier(). We only have to check if we did in the + * meantime a stm_write_barrier(). + */ + if (P->h_tid & GCFLAG_PUBLIC) + { + if (P->h_tid & GCFLAG_NURSERY_MOVED) + { + P = (gcptr)P->h_revision; + assert(P->h_tid & GCFLAG_PUBLIC); + } + if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + { + struct tx_descriptor *d = thread_descriptor; + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + P = item->val; + assert(!(P->h_tid & GCFLAG_PUBLIC)); + no_private_obj: + ; + } + } + assert(!(P->h_tid & GCFLAG_STUB)); + return P; +} + static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj, int from_stolen) { diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -181,6 +181,7 @@ void SpinLoop(int); gcptr stm_DirectReadBarrier(gcptr); +gcptr stm_RepeatReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but not recording anything */ diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -143,6 +143,7 @@ void stm_abort_info_push(gcptr obj, long fieldoffsets[]) { struct tx_descriptor *d = thread_descriptor; + obj = stm_read_barrier(obj); gcptrlist_insert2(&d->abortinfo, obj, (gcptr)fieldoffsets); } @@ -188,7 +189,7 @@ WRITE_BUF(buffer, res_size); WRITE('e'); for (i=0; iabortinfo.size; i+=2) { - char *object = (char *)d->abortinfo.items[i+0]; + char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); long *fieldoffsets = (long*)d->abortinfo.items[i+1]; long kind, offset; size_t rps_size; diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -87,3 +87,19 @@ c = lib.stm_inspect_abort_info() assert c assert ffi.string(c).endswith("e0:e") + +def test_latest_version(): + fo1 = ffi.new("long[]", [1, HDR, 0]) + p = palloc(HDR + WORD) + lib.rawsetlong(p, 0, -9827892) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + lib.stm_abort_info_push(p, fo1) + lib.setlong(p, 0, 424242) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("ei424242ee") From noreply at buildbot.pypy.org Sat Jul 6 18:52:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:36 +0200 (CEST) Subject: [pypy-commit] stmgc default: Use a more ad-hoc string variant Message-ID: <20130706165236.24D041C1471@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r362:fd4fd6cb406b Date: 2013-07-06 15:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/fd4fd6cb406b/ Log: Use a more ad-hoc string variant diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -224,7 +224,10 @@ rps = *(char **)(object + offset); offset = *fieldoffsets++; if (rps) { - rps_size = stmcb_size((gcptr)rps) - offset; + /* xxx a bit ad-hoc: it's a string whose length is a + * long at 'offset', following immediately the offset */ + rps_size = *(long *)(rps + offset); + offset += sizeof(long); assert(rps_size >= 0); res_size = sprintf(buffer, "%zu:", rps_size); WRITE_BUF(buffer, res_size); diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -54,24 +54,23 @@ sys.maxint, sys.maxint * 2 + 1)) def test_inspect_abort_info_string(): - fo1 = ffi.new("long[]", [3, HDR, HDR + 1, 0]) + fo1 = ffi.new("long[]", [3, HDR + WORD, HDR, 0]) # @perform_transaction def run(retry_counter): if retry_counter == 0: - p = nalloc_refs(1) + p = nalloc_refs(2) q = nalloc(HDR + 2 * WORD) - lib.setptr(p, 0, q) - lib.setlong(q, 0, -937251491) - lib.setlong(q, 1, -389541051) + lib.setptr(p, 1, q) + lib.setlong(q, 0, 3) + word = "ABC" + "\xFF" * (WORD - 3) + lib.setlong(q, 1, struct.unpack("l", word)[0]) lib.stm_abort_info_push(p, fo1) abort_and_retry() else: c = lib.stm_inspect_abort_info() assert c - expected = struct.pack("ll", -937251491, -389541051) - assert ffi.string(c).endswith("e%d:%se" % ( - len(expected) - 1, expected[1:])) + assert ffi.string(c).endswith("e3:ABCe") def test_inspect_null(): fo1 = ffi.new("long[]", [3, HDR, HDR + 1, 0]) From noreply at buildbot.pypy.org Sat Jul 6 18:52:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:37 +0200 (CEST) Subject: [pypy-commit] stmgc default: Officialize stm_abort_and_retry() Message-ID: <20130706165237.276191C1471@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r363:007ac02eb935 Date: 2013-07-06 15:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/007ac02eb935/ Log: Officialize stm_abort_and_retry() diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -756,6 +756,11 @@ smp_spinloop(); } +void stm_abort_and_retry(void) +{ + AbortTransaction(ABRT_MANUAL); +} + void AbortPrivateFromProtected(struct tx_descriptor *d); void AbortTransaction(int num) diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -109,6 +109,7 @@ void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); +void stm_abort_and_retry(void); /**************** END OF PUBLIC INTERFACE *****************/ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -68,6 +68,7 @@ void stm_abort_info_push(gcptr obj, long fieldoffsets[]); void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); + void stm_abort_and_retry(void); /* extra non-public code */ void printfcolor(char *msg); @@ -622,7 +623,7 @@ assert fine == [True] def abort_and_retry(): - lib.AbortTransaction(lib.ABRT_MANUAL) + lib.stm_abort_and_retry() def classify(p): private_from_protected = (p.h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) != 0 From noreply at buildbot.pypy.org Sat Jul 6 18:52:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 18:52:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add stm_{minor, major}_collect() with the semantics needed Message-ID: <20130706165238.292951C1471@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r364:88246694721e Date: 2013-07-06 16:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/88246694721e/ Log: Add stm_{minor,major}_collect() with the semantics needed for the users. diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -109,7 +109,10 @@ void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); +/* mostly for debugging support */ void stm_abort_and_retry(void); +void stm_minor_collect(void); +void stm_major_collect(void); /**************** END OF PUBLIC INTERFACE *****************/ diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -328,6 +328,18 @@ AbortNowIfDelayed(); /* if another thread ran a major GC */ } +void stm_minor_collect(void) +{ + stmgc_minor_collect(); + stmgcpage_possibly_major_collect(0); +} + +void stm_major_collect(void) +{ + stmgc_minor_collect(); + stmgcpage_possibly_major_collect(1); +} + /************************************************************/ /***** Prebuilt roots, added in the list as the transaction that changed From noreply at buildbot.pypy.org Sat Jul 6 21:37:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:37:34 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Progress on the remaining tests Message-ID: <20130706193734.9207C1C3164@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65236:b91b405b4dec Date: 2013-07-06 16:12 +0200 http://bitbucket.org/pypy/pypy/changeset/b91b405b4dec/ Log: Progress on the remaining tests diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -1,6 +1,6 @@ import py from rpython.rlib import rstm, rgc -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr from rpython.translator.stm.test.support import CompiledSTMTests @@ -120,8 +120,6 @@ def test_bug1(self): # - class Foobar: - pass def check(foobar, retry_counter): rgc.collect(0) return 0 @@ -131,7 +129,7 @@ self.count = count def g(): x = X(1000) - rstm.perform_transaction(check, Foobar, Foobar()) + perform_transaction(lltype.malloc(FOOBAR)) return x def entry_point(argv): x = X(len(argv)) @@ -139,14 +137,13 @@ print '<', x.count, y.count, '>' return 0 # + perform_transaction = rstm.make_perform_transaction(check, FOOBARP) t, cbuilder = self.compile(entry_point, backendopt=True) data = cbuilder.cmdexec('a b c d') assert '< 5 1000 >' in data, "got: %r" % (data,) def test_bug2(self): # - class Foobar: - pass def check(foobar, retry_counter): return 0 # do nothing # @@ -158,7 +155,7 @@ x = prebuilt2[count] x.foobar = 2 # 'x' becomes a local # - rstm.perform_transaction(check, Foobar, Foobar()) + perform_transaction(lltype.malloc(FOOBAR)) # 'x' becomes the global again # y = prebuilt2[count] # same prebuilt obj @@ -170,13 +167,12 @@ print bug2(1) return 0 # + perform_transaction = rstm.make_perform_transaction(check, FOOBAR) t, cbuilder = self.compile(entry_point, backendopt=True) data = cbuilder.cmdexec('') assert '12\n12\n' in data, "got: %r" % (data,) def test_prebuilt_nongc(self): - class Foobar: - pass def check(foobar, retry_counter): return 0 # do nothing from rpython.rtyper.lltypesystem import lltype @@ -187,7 +183,7 @@ # hints={'stm_thread_local': True}) #s2 = lltype.malloc(S2, immortal=True, flavor='raw') def do_stuff(): - rstm.perform_transaction(check, Foobar, Foobar()) + perform_transaction(lltype.malloc(FOOBAR)) print s1.r.x #print s2.r.x do_stuff._dont_inline_ = True @@ -199,6 +195,7 @@ do_stuff() return 0 # + perform_transaction = rstm.make_perform_transaction(check, FOOBAR) t, cbuilder = self.compile(main) data = cbuilder.cmdexec('') assert '42\n' in data, "got: %r" % (data,) @@ -256,3 +253,9 @@ t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') assert 'li102ee10:hi there 3e\n0\n' in data + + +FOOBAR = lltype.GcStruct('FOOBAR', + ('result_value', lltype.Void), + ('got_exception', rclass.OBJECTPTR)) +FOOBARP = lltype.Ptr(FOOBAR) From noreply at buildbot.pypy.org Sat Jul 6 21:37:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:37:33 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Pass again test_ztranslated:test_abort_info Message-ID: <20130706193733.588B61C3157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65235:302484c08e8c Date: 2013-07-06 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/302484c08e8c/ Log: Pass again test_ztranslated:test_abort_info diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -1,5 +1,5 @@ from rpython.rlib.objectmodel import we_are_translated, specialize -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry @@ -28,13 +28,13 @@ def abort_info_pop(count): if we_are_translated(): - stmgcintf.StmOperations.abort_info_pop(count) + llop.stm_abort_info_pop(lltype.Void, count) def charp_inspect_abort_info(): - return stmgcintf.StmOperations.inspect_abort_info() + return llop.stm_inspect_abort_info(rffi.CCHARP) def abort_and_retry(): - stmgcintf.StmOperations.abort_and_retry() + llop.stm_abort_and_retry(lltype.Void) def before_external_call(): llop.stm_commit_transaction(lltype.Void) @@ -111,6 +111,7 @@ lst.append(-1) # end of sublist continue fieldname = 'inst_' + fieldname + extraofs = None STRUCT = v_instance.concretetype.TO while not hasattr(STRUCT, fieldname): STRUCT = STRUCT.super @@ -121,12 +122,15 @@ kind = 2 elif TYPE == lltype.Ptr(rstr.STR): kind = 3 + extraofs = llmemory.offsetof(rstr.STR, 'chars') else: raise NotImplementedError( "abort_info_push(%s, %r): field of type %r" % (STRUCT.__name__, fieldname, TYPE)) lst.append(kind) lst.append(llmemory.offsetof(STRUCT, fieldname)) + if extraofs is not None: + lst.append(extraofs) lst.append(0) ARRAY = rffi.CArray(lltype.Signed) array = lltype.malloc(ARRAY, len(lst), flavor='raw', immortal=True) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -443,12 +443,17 @@ 'stm_perform_transaction':LLOp(canmallocgc=True), 'stm_enter_callback_call':LLOp(), 'stm_leave_callback_call':LLOp(), + 'stm_abort_and_retry': LLOp(), 'stm_threadlocalref_get': LLOp(sideeffects=False), 'stm_threadlocalref_set': LLOp(), 'stm_threadlocal_get': LLOp(sideeffects=False), 'stm_threadlocal_set': LLOp(), + 'stm_abort_info_push': LLOp(), + 'stm_abort_info_pop': LLOp(), + 'stm_inspect_abort_info': LLOp(sideeffects=False), + # __________ address operations __________ 'boehm_malloc': LLOp(), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -606,6 +606,10 @@ OP_STM_PERFORM_TRANSACTION = _OP_STM OP_STM_ENTER_CALLBACK_CALL = _OP_STM OP_STM_LEAVE_CALLBACK_CALL = _OP_STM + OP_STM_ABORT_AND_RETRY = _OP_STM + OP_STM_ABORT_INFO_PUSH = _OP_STM + OP_STM_ABORT_INFO_POP = _OP_STM + OP_STM_INSPECT_ABORT_INFO = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -159,6 +159,22 @@ arg0 = funcgen.expr(op.args[0]) return 'stm_leave_callback_call(%s);' % (arg0,) +def stm_abort_and_retry(funcgen, op): + return 'stm_abort_and_retry();' + +def stm_abort_info_push(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + arg1 = funcgen.expr(op.args[1]) + return 'stm_abort_info_push((gcptr)%s, %s);' % (arg0, arg1) + +def stm_abort_info_pop(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + return 'stm_abort_info_pop(%s);' % (arg0,) + +def stm_inspect_abort_info(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_inspect_abort_info();' % (result,) + def op_stm(funcgen, op): func = globals()[op.opname] From noreply at buildbot.pypy.org Sat Jul 6 21:37:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:37:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Import stmgc/007ac02eb935 Message-ID: <20130706193732.0789D1C2FBF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65234:654fcdd1eb30 Date: 2013-07-06 15:57 +0200 http://bitbucket.org/pypy/pypy/changeset/654fcdd1eb30/ Log: Import stmgc/007ac02eb935 diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -249,6 +249,36 @@ } } +gcptr stm_RepeatReadBarrier(gcptr P) +{ + /* Version of stm_DirectReadBarrier() that doesn't abort and assumes + * that 'P' was already an up-to-date result of a previous + * stm_DirectReadBarrier(). We only have to check if we did in the + * meantime a stm_write_barrier(). + */ + if (P->h_tid & GCFLAG_PUBLIC) + { + if (P->h_tid & GCFLAG_NURSERY_MOVED) + { + P = (gcptr)P->h_revision; + assert(P->h_tid & GCFLAG_PUBLIC); + } + if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + { + struct tx_descriptor *d = thread_descriptor; + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + P = item->val; + assert(!(P->h_tid & GCFLAG_PUBLIC)); + no_private_obj: + ; + } + } + assert(!(P->h_tid & GCFLAG_STUB)); + return P; +} + static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj, int from_stolen) { @@ -423,29 +453,6 @@ goto restart_all; } -#if 0 -void *stm_DirectReadBarrierFromR(void *G1, void *R_Container1, size_t offset) -{ - return _direct_read_barrier((gcptr)G1, (gcptr)R_Container1, offset); -} -#endif - -gcptr stm_RepeatReadBarrier(gcptr O) -{ - abort();//XXX -#if 0 - // LatestGlobalRevision(O) would either return O or abort - // the whole transaction, so omitting it is not wrong - struct tx_descriptor *d = thread_descriptor; - gcptr L; - wlog_t *entry; - G2L_FIND(d->global_to_local, O, entry, return O); - L = entry->val; - assert(L->h_revision == stm_local_revision); - return L; -#endif -} - static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { gcptr B; @@ -750,10 +757,10 @@ smp_spinloop(); } -#if 0 -size_t _stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, - int abort_reason, char *output); -#endif +void stm_abort_and_retry(void) +{ + AbortTransaction(ABRT_MANUAL); +} void AbortPrivateFromProtected(struct tx_descriptor *d); @@ -796,41 +803,24 @@ elapsed_time = 1; } -#if 0 - size_t size; if (elapsed_time >= d->longest_abort_info_time) { /* decode the 'abortinfo' and produce a human-readable summary in the string 'longest_abort_info' */ - size = _stm_decode_abort_info(d, elapsed_time, num, NULL); + size_t size = stm_decode_abort_info(d, elapsed_time, num, NULL); free(d->longest_abort_info); d->longest_abort_info = malloc(size); if (d->longest_abort_info == NULL) d->longest_abort_info_time = 0; /* out of memory! */ else { - if (_stm_decode_abort_info(d, elapsed_time, + if (stm_decode_abort_info(d, elapsed_time, num, d->longest_abort_info) != size) stm_fatalerror("during stm abort: object mutated unexpectedly\n"); d->longest_abort_info_time = elapsed_time; } } -#endif - -#if 0 - /* run the undo log in reverse order, cancelling the values set by - stm_ThreadLocalRef_LLSet(). */ - if (d->undolog.size > 0) { - gcptr *item = d->undolog.items; - long i; - for (i=d->undolog.size; i>=0; i-=2) { - void **addr = (void **)(item[i-2]); - void *oldvalue = (void *)(item[i-1]); - *addr = oldvalue; - } - } -#endif /* upon abort, set the reads size limit to 94% of how much was read so far. This should ensure that, assuming the retry does the same @@ -937,10 +927,7 @@ d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); -#if 0 - gcptrlist_clear(&d->undolog); gcptrlist_clear(&d->abortinfo); -#endif } void BeginTransaction(jmp_buf* buf) @@ -1497,17 +1484,6 @@ /************************************************************/ -#if 0 -void stm_ThreadLocalRef_LLSet(void **addr, void *newvalue) -{ - struct tx_descriptor *d = thread_descriptor; - gcptrlist_insert2(&d->undolog, (gcptr)addr, (gcptr)*addr); - *addr = newvalue; -} -#endif - -/************************************************************/ - struct tx_descriptor *stm_tx_head = NULL; struct tx_public_descriptor *stm_descriptor_array[MAX_THREADS] = {0}; static revision_t descriptor_array_free_list = 0; @@ -1636,11 +1612,8 @@ assert(d->private_from_protected.size == 0); gcptrlist_delete(&d->private_from_protected); gcptrlist_delete(&d->list_of_read_objects); -#if 0 gcptrlist_delete(&d->abortinfo); free(d->longest_abort_info); - gcptrlist_delete(&d->undolog); -#endif int num_aborts = 0, num_spinloops = 0; char line[256], *p = line; diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -153,9 +153,9 @@ unsigned int num_aborts[ABORT_REASONS]; unsigned int num_spinloops[SPINLOOP_REASONS]; struct GcPtrList list_of_read_objects; - //struct GcPtrList abortinfo; struct GcPtrList private_from_protected; struct G2L public_to_private; + struct GcPtrList abortinfo; char *longest_abort_info; long long longest_abort_info_time; revision_t *private_revision_ref; diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/extra.c @@ -0,0 +1,260 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#include "stmimpl.h" + + +void stm_copy_to_old_id_copy(gcptr obj, gcptr id) +{ + //assert(!is_in_nursery(thread_descriptor, id)); + assert(id->h_tid & GCFLAG_OLD); + + size_t size = stmgc_size(obj); + memcpy(id, obj, size); + id->h_tid &= ~GCFLAG_HAS_ID; + id->h_tid |= GCFLAG_OLD; + dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id)); +} + +/************************************************************/ +/* Each object has a h_original pointer to an old copy of + the same object (e.g. an old revision), the "original". + The memory location of this old object is used as the ID + for this object. If h_original is NULL *and* it is an + old object copy, it itself is the original. This invariant + must be upheld by all code dealing with h_original. + The original copy must never be moved again. Also, it may + be just a stub-object. + + If we want the ID of an object which is still young, + we must preallocate an old shadow-original that is used + as the target of the young object in a minor collection. + In this case, we set the HAS_ID flag on the young obj + to notify minor_collect. + This flag can be lost if the young obj is stolen. Then + the stealing thread uses the shadow-original itself and + minor_collect must not overwrite it again. + Also, if there is already a backup-copy around, we use + this instead of allocating another old object to use as + the shadow-original. + */ + +static revision_t mangle_hash(revision_t n) +{ + /* To hash pointers in dictionaries. Assumes that i shows some + alignment (to 4, 8, maybe 16 bytes), so we use the following + formula to avoid the trailing bits being always 0. + This formula is reversible: two different values of 'i' will + always give two different results. + */ + return n ^ (((urevision_t)n) >> 4); +} + + +revision_t stm_hash(gcptr p) +{ + /* Prebuilt objects may have a specific hash stored in an extra + field. For now, we will simply always follow h_original and + see, if it is a prebuilt object (XXX: maybe propagate a flag + to all copies of a prebuilt to avoid this cache miss). + */ + if (p->h_original) { + if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + return p->h_original; + } + gcptr orig = (gcptr)p->h_original; + if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) { + return orig->h_original; + } + } + return mangle_hash(stm_id(p)); +} + + +revision_t stm_id(gcptr p) +{ + struct tx_descriptor *d = thread_descriptor; + revision_t result; + + if (p->h_original) { /* fast path */ + if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + /* h_original may contain a specific hash value, + but in case of the prebuilt original version, + its memory location is the id */ + return (revision_t)p; + } + + dprintf(("stm_id(%p) has orig fst: %p\n", + p, (gcptr)p->h_original)); + return p->h_original; + } + else if (p->h_tid & GCFLAG_OLD) { + /* old objects must have an h_original xOR be + the original itself. */ + dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p)); + return (revision_t)p; + } + + spinlock_acquire(d->public_descriptor->collection_lock, 'I'); + /* old objects must have an h_original xOR be + the original itself. + if some thread stole p when it was still young, + it must have set h_original. stealing an old obj + makes the old obj "original". + */ + if (p->h_original) { /* maybe now? */ + result = p->h_original; + dprintf(("stm_id(%p) has orig: %p\n", + p, (gcptr)p->h_original)); + } + else { + /* must create shadow original object XXX: or use + backup, if exists */ + + /* XXX use stmgcpage_malloc() directly, we don't need to copy + * the contents yet */ + gcptr O = stmgc_duplicate_old(p); + p->h_original = (revision_t)O; + p->h_tid |= GCFLAG_HAS_ID; + + if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + gcptr B = (gcptr)p->h_revision; + B->h_original = (revision_t)O; + } + + result = (revision_t)O; + dprintf(("stm_id(%p) young, make shadow %p\n", p, O)); + } + + spinlock_release(d->public_descriptor->collection_lock); + return result; +} + +_Bool stm_pointer_equal(gcptr p1, gcptr p2) +{ + /* fast path for two equal pointers */ + if (p1 == p2) + return 1; + /* types must be the same */ + if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) + return 0; + return stm_id(p1) == stm_id(p2); +} + +/************************************************************/ + +void stm_abort_info_push(gcptr obj, long fieldoffsets[]) +{ + struct tx_descriptor *d = thread_descriptor; + obj = stm_read_barrier(obj); + gcptrlist_insert2(&d->abortinfo, obj, (gcptr)fieldoffsets); +} + +void stm_abort_info_pop(long count) +{ + struct tx_descriptor *d = thread_descriptor; + long newsize = d->abortinfo.size - 2 * count; + gcptrlist_reduce_size(&d->abortinfo, newsize < 0 ? 0 : newsize); +} + +size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, + int abort_reason, char *output) +{ + /* re-encodes the abort info as a single string. + For convenience (no escaping needed, no limit on integer + sizes, etc.) we follow the bittorrent format. */ + size_t totalsize = 0; + long i; + char buffer[32]; + size_t res_size; +#define WRITE(c) { totalsize++; if (output) *output++=(c); } +#define WRITE_BUF(p, sz) { totalsize += (sz); \ + if (output) { \ + memcpy(output, (p), (sz)); output += (sz); \ + } \ + } + WRITE('l'); + WRITE('l'); + res_size = sprintf(buffer, "i%llde", (long long)elapsed_time); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%de", (int)abort_reason); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lde", (long)d->public_descriptor_index); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lde", (long)d->atomic); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%de", (int)d->active); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lue", (unsigned long)d->count_reads); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lue", + (unsigned long)d->reads_size_limit_nonatomic); + WRITE_BUF(buffer, res_size); + WRITE('e'); + for (i=0; iabortinfo.size; i+=2) { + char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); + long *fieldoffsets = (long*)d->abortinfo.items[i+1]; + long kind, offset; + size_t rps_size; + char *rps; + + while (1) { + kind = *fieldoffsets++; + if (kind <= 0) { + if (kind == -2) { + WRITE('l'); /* '[', start of sublist */ + continue; + } + if (kind == -1) { + WRITE('e'); /* ']', end of sublist */ + continue; + } + break; /* 0, terminator */ + } + offset = *fieldoffsets++; + switch(kind) { + case 1: /* signed */ + res_size = sprintf(buffer, "i%lde", + *(long*)(object + offset)); + WRITE_BUF(buffer, res_size); + break; + case 2: /* unsigned */ + res_size = sprintf(buffer, "i%lue", + *(unsigned long*)(object + offset)); + WRITE_BUF(buffer, res_size); + break; + case 3: /* a string of bytes from the target object */ + rps = *(char **)(object + offset); + offset = *fieldoffsets++; + if (rps) { + /* xxx a bit ad-hoc: it's a string whose length is a + * long at 'offset', following immediately the offset */ + rps_size = *(long *)(rps + offset); + offset += sizeof(long); + assert(rps_size >= 0); + res_size = sprintf(buffer, "%zu:", rps_size); + WRITE_BUF(buffer, res_size); + WRITE_BUF(rps + offset, rps_size); + } + else { + WRITE_BUF("0:", 2); + } + break; + default: + stm_fatalerror("corrupted abort log\n"); + } + } + } + WRITE('e'); + WRITE('\0'); /* final null character */ +#undef WRITE +#undef WRITE_BUF + return totalsize; +} + +char *stm_inspect_abort_info(void) +{ + struct tx_descriptor *d = thread_descriptor; + if (d->longest_abort_info_time <= 0) + return NULL; + d->longest_abort_info_time = 0; + return d->longest_abort_info; +} diff --git a/rpython/translator/stm/src_stm/extra.h b/rpython/translator/stm/src_stm/extra.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/extra.h @@ -0,0 +1,10 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _SRCSTM_EXTRA_H +#define _SRCSTM_EXTRA_H + + +void stm_copy_to_old_id_copy(gcptr obj, gcptr id); +size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, + int abort_reason, char *output); + +#endif diff --git a/rpython/translator/stm/src_stm/fprintcolor.c b/rpython/translator/stm/src_stm/fprintcolor.c --- a/rpython/translator/stm/src_stm/fprintcolor.c +++ b/rpython/translator/stm/src_stm/fprintcolor.c @@ -6,7 +6,7 @@ { va_list ap; -#ifdef _GC_DEBUG +#ifdef _GC_DEBUGPRINTS dprintf(("STM Subsystem: Fatal Error\n")); #else fprintf(stderr, "STM Subsystem: Fatal Error\n"); @@ -20,7 +20,7 @@ } -#ifdef _GC_DEBUG +#ifdef _GC_DEBUGPRINTS static __thread revision_t tcolor = 0; static revision_t tnextid = 0; diff --git a/rpython/translator/stm/src_stm/fprintcolor.h b/rpython/translator/stm/src_stm/fprintcolor.h --- a/rpython/translator/stm/src_stm/fprintcolor.h +++ b/rpython/translator/stm/src_stm/fprintcolor.h @@ -7,7 +7,7 @@ __attribute__((format (printf, 1, 2), noreturn)); -#ifdef _GC_DEBUG +#ifdef _GC_DEBUGPRINTS #define dprintf(args) threadcolor_printf args int dprintfcolor(void); diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -213,6 +213,30 @@ static struct GcPtrList objects_to_trace; +static void keep_original_alive(gcptr obj) +{ + /* keep alive the original of a visited object */ + gcptr id_copy = (gcptr)obj->h_original; + /* prebuilt original objects may have a predifined + hash in h_original */ + if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; + /* see fix_outdated() */ + id_copy->h_tid |= GCFLAG_VISITED; + + /* XXX: may not always need tracing? */ + //if (!(id_copy->h_tid & GCFLAG_STUB)) + // gcptrlist_insert(&objects_to_trace, id_copy); + } + else { + /* prebuilt originals won't get collected anyway + and if they are not reachable in any other way, + we only ever need their location, not their content */ + } + } +} + static void visit(gcptr *pobj) { gcptr obj = *pobj; @@ -227,6 +251,8 @@ obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; gcptrlist_insert(&objects_to_trace, obj); + + keep_original_alive(obj); } } else if (obj->h_tid & GCFLAG_PUBLIC) { @@ -247,6 +273,8 @@ obj = (gcptr)(obj->h_revision - 2); if (!(obj->h_tid & GCFLAG_PUBLIC)) { prev_obj->h_tid |= GCFLAG_VISITED; + keep_original_alive(prev_obj); + assert(*pobj == prev_obj); gcptr obj1 = obj; visit(&obj1); /* recursion, but should be only once */ @@ -257,6 +285,9 @@ } if (!(obj->h_revision & 3)) { + /* obj is neither a stub nor a most recent revision: + completely ignore obj->h_revision */ + obj = (gcptr)obj->h_revision; assert(obj->h_tid & GCFLAG_PUBLIC); prev_obj->h_revision = (revision_t)obj; @@ -275,7 +306,14 @@ assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); gcptr B = (gcptr)obj->h_revision; assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - + + if (obj->h_original && (gcptr)obj->h_original != B) { + /* if B is original, it will be visited anyway */ + assert(obj->h_original == B->h_original); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + keep_original_alive(obj); + } + obj->h_tid |= GCFLAG_VISITED; B->h_tid |= GCFLAG_VISITED; assert(!(obj->h_tid & GCFLAG_STUB)); @@ -294,6 +332,7 @@ } } + static void visit_keep(gcptr obj) { if (!(obj->h_tid & GCFLAG_VISITED)) { @@ -305,6 +344,7 @@ assert(!(obj->h_revision & 2)); visit((gcptr *)&obj->h_revision); } + keep_original_alive(obj); } } @@ -376,8 +416,24 @@ outdated, it will be found at that time */ gcptr R = item->addr; gcptr L = item->val; + + /* Objects that were not visited yet must have the PUB_TO_PRIV + flag. Except if that transaction will abort anyway, then it + may be removed from a previous major collection that didn't + fix the PUB_TO_PRIV because the transaction was going to + abort anyway: + 1. minor_collect before major collect (R->L, R is outdated, abort) + 2. major collect removes flag + 3. major collect again, same thread, no time to abort + 4. flag still removed + */ + assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, + R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); visit_keep(R); if (L != NULL) { + /* minor collection found R->L in public_to_young + and R was modified. It then sets item->val to NULL and wants + to abort later. */ revision_t v = L->h_revision; visit_keep(L); /* a bit of custom logic here: if L->h_revision used to @@ -385,8 +441,10 @@ keep this property, even though visit_keep(L) might decide it would be better to make it point to a more recent copy. */ - if (v == (revision_t)R) + if (v == (revision_t)R) { + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); L->h_revision = v; /* restore */ + } } } G2L_LOOP_END; @@ -449,6 +507,7 @@ just removing it is very wrong --- we want 'd' to abort. */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; } @@ -483,14 +542,16 @@ /* We are now after visiting all objects, and we know the * transaction isn't aborting because of this collection. We have * cleared GCFLAG_PUBLIC_TO_PRIVATE from public objects at the end - * of the chain. Now we have to set it again on public objects that - * have a private copy. + * of the chain (head revisions). Now we have to set it again on + * public objects that have a private copy. */ wlog_t *item; dprintf(("fix public_to_private on thread %p\n", d)); G2L_LOOP_FORWARD(d->public_to_private, item) { + assert(item->addr->h_tid & GCFLAG_VISITED); + assert(item->val->h_tid & GCFLAG_VISITED); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -45,7 +45,12 @@ void stmgc_done_nursery(void) { struct tx_descriptor *d = thread_descriptor; - assert(!minor_collect_anything_to_do(d)); + /* someone may have called minor_collect_soon() + inbetween the preceeding minor_collect() and + this assert (committransaction() -> + updatechainheads() -> stub_malloc() -> ...): */ + assert(!minor_collect_anything_to_do(d) + || d->nursery_current == d->nursery_end); stm_free(d->nursery_base, GC_NURSERY); gcptrlist_delete(&d->old_objects_to_trace); @@ -121,131 +126,6 @@ } /************************************************************/ -/* Each object has a h_original pointer to an old copy of - the same object (e.g. an old revision), the "original". - The memory location of this old object is used as the ID - for this object. If h_original is NULL *and* it is an - old object copy, it itself is the original. This invariant - must be upheld by all code dealing with h_original. - The original copy must never be moved again. Also, it may - be just a stub-object. - - If we want the ID of an object which is still young, - we must preallocate an old shadow-original that is used - as the target of the young object in a minor collection. - In this case, we set the HAS_ID flag on the young obj - to notify minor_collect. - This flag can be lost if the young obj is stolen. Then - the stealing thread uses the shadow-original itself and - minor_collect must not overwrite it again. - Also, if there is already a backup-copy around, we use - this instead of allocating another old object to use as - the shadow-original. - */ - -static revision_t mangle_hash(revision_t n) -{ - /* To hash pointers in dictionaries. Assumes that i shows some - alignment (to 4, 8, maybe 16 bytes), so we use the following - formula to avoid the trailing bits being always 0. - This formula is reversible: two different values of 'i' will - always give two different results. - */ - return n ^ (((urevision_t)n) >> 4); -} - - -revision_t stm_hash(gcptr p) -{ - /* Prebuilt objects may have a specific hash stored in an extra - field. For now, we will simply always follow h_original and - see, if it is a prebuilt object (XXX: maybe propagate a flag - to all copies of a prebuilt to avoid this cache miss). - */ - if (p->h_original) { - if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { - return p->h_original; - } - gcptr orig = (gcptr)p->h_original; - if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) { - return orig->h_original; - } - } - return mangle_hash(stm_id(p)); -} - - -revision_t stm_id(gcptr p) -{ - struct tx_descriptor *d = thread_descriptor; - revision_t result; - - if (p->h_original) { /* fast path */ - if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { - /* h_original may contain a specific hash value, - but in case of the prebuilt original version, - its memory location is the id */ - return (revision_t)p; - } - - dprintf(("stm_id(%p) has orig fst: %p\n", - p, (gcptr)p->h_original)); - return p->h_original; - } - else if (p->h_tid & GCFLAG_OLD) { - /* old objects must have an h_original xOR be - the original itself. */ - dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p)); - return (revision_t)p; - } - - spinlock_acquire(d->public_descriptor->collection_lock, 'I'); - /* old objects must have an h_original xOR be - the original itself. - if some thread stole p when it was still young, - it must have set h_original. stealing an old obj - makes the old obj "original". - */ - if (p->h_original) { /* maybe now? */ - result = p->h_original; - dprintf(("stm_id(%p) has orig: %p\n", - p, (gcptr)p->h_original)); - } - else { - /* must create shadow original object XXX: or use - backup, if exists */ - - /* XXX use stmgcpage_malloc() directly, we don't need to copy - * the contents yet */ - gcptr O = stmgc_duplicate_old(p); - p->h_original = (revision_t)O; - p->h_tid |= GCFLAG_HAS_ID; - - if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { - gcptr B = (gcptr)p->h_revision; - B->h_original = (revision_t)O; - } - - result = (revision_t)O; - dprintf(("stm_id(%p) young, make shadow %p\n", p, O)); - } - - spinlock_release(d->public_descriptor->collection_lock); - return result; -} - -_Bool stm_pointer_equal(gcptr p1, gcptr p2) -{ - /* fast path for two equal pointers */ - if (p1 == p2) - return 1; - /* types must be the same */ - if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) - return 0; - return stm_id(p1) == stm_id(p2); -} - -/************************************************************/ static inline gcptr create_old_object_copy(gcptr obj) { @@ -262,18 +142,6 @@ return fresh_old_copy; } -inline void copy_to_old_id_copy(gcptr obj, gcptr id) -{ - assert(!is_in_nursery(thread_descriptor, id)); - assert(id->h_tid & GCFLAG_OLD); - - size_t size = stmgc_size(obj); - memcpy(id, obj, size); - id->h_tid &= ~GCFLAG_HAS_ID; - id->h_tid |= GCFLAG_OLD; - dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id)); -} - static void visit_if_young(gcptr *root) { gcptr obj = *root; @@ -299,7 +167,7 @@ /* already has a place to go to */ gcptr id_obj = (gcptr)obj->h_original; - copy_to_old_id_copy(obj, id_obj); + stm_copy_to_old_id_copy(obj, id_obj); fresh_old_copy = id_obj; obj->h_tid &= ~GCFLAG_HAS_ID; } @@ -315,6 +183,7 @@ *root = fresh_old_copy; /* add 'fresh_old_copy' to the list of objects to trace */ + assert(!(fresh_old_copy->h_tid & GCFLAG_PUBLIC)); gcptrlist_insert(&d->old_objects_to_trace, fresh_old_copy); } } @@ -426,6 +295,7 @@ gcptr P = items[i]; assert(P->h_tid & GCFLAG_PUBLIC); assert(P->h_tid & GCFLAG_OLD); + assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); revision_t v = ACCESS_ONCE(P->h_revision); wlog_t *item; @@ -474,7 +344,18 @@ assert(obj->h_tid & GCFLAG_OLD); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); - obj->h_tid |= GCFLAG_WRITE_BARRIER; + + /* We add the WRITE_BARRIER flag to objects here, but warning: + we may occasionally see a PUBLIC object --- one that was + a private/protected object when it was added to + old_objects_to_trace, and has been stolen. So we have to + check and not do any change to the obj->h_tid in that case. + Otherwise this conflicts with the rule that we may only + modify obj->h_tid of a public object in order to add + PUBLIC_TO_PRIVATE. + */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) + obj->h_tid |= GCFLAG_WRITE_BARRIER; stmgc_trace(obj, &visit_if_young); } @@ -672,6 +553,7 @@ gcptr P = stmgcpage_malloc(allocate_size); memset(P, 0, allocate_size); P->h_tid = tid | GCFLAG_OLD; + assert(!(P->h_tid & GCFLAG_PUBLIC)); gcptrlist_insert(&d->old_objects_to_trace, P); return P; } diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -637f6c9d19f7 +007ac02eb935 diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -2,8 +2,6 @@ #include "stmimpl.h" -inline void copy_to_old_id_copy(gcptr obj, gcptr id); - gcptr stm_stub_malloc(struct tx_public_descriptor *pd) { assert(pd->collection_lock != 0); @@ -168,7 +166,7 @@ /* use id-copy for us */ O = (gcptr)L->h_original; L->h_tid &= ~GCFLAG_HAS_ID; - copy_to_old_id_copy(L, O); + stm_copy_to_old_id_copy(L, O); O->h_original = 0; } else { /* Copy the object out of the other thread's nursery, @@ -254,6 +252,7 @@ for (i = 0; i < size; i += 2) { gcptr B = items[i]; assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */ + assert(B->h_tid & GCFLAG_PUBLIC); /* to be on the safe side --- but actually needed, see the gcptrlist_insert2(L, NULL) above */ @@ -265,6 +264,7 @@ assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); assert(IS_POINTER(L->h_revision)); + assert(B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); g2l_insert(&d->public_to_private, B, L); /* this is definitely needed: all keys in public_to_private diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -10,5 +10,6 @@ #include "nursery.c" #include "gcpage.c" #include "stmsync.c" +#include "extra.c" #include "dbgmem.c" #include "fprintcolor.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -102,6 +102,19 @@ It is set to NULL by stm_initialize(). */ extern __thread gcptr stm_thread_local_obj; +/* For tracking where aborts occurs, you can push/pop information + into this stack. When an abort occurs this information is encoded + and flattened into a buffer which can later be retrieved with + stm_inspect_abort_info(). (XXX details not documented yet) */ +void stm_abort_info_push(gcptr obj, long fieldoffsets[]); +void stm_abort_info_pop(long count); +char *stm_inspect_abort_info(void); + +void stm_abort_and_retry(void); + + +/**************** END OF PUBLIC INTERFACE *****************/ +/************************************************************/ /* macro-like functionality */ diff --git a/rpython/translator/stm/src_stm/stmimpl.h b/rpython/translator/stm/src_stm/stmimpl.h --- a/rpython/translator/stm/src_stm/stmimpl.h +++ b/rpython/translator/stm/src_stm/stmimpl.h @@ -13,7 +13,7 @@ # endif #endif -#ifdef _GC_DEBUG +#if defined(_GC_DEBUG) && !defined(DUMP_EXTRA) # if _GC_DEBUG >= 2 # define DUMP_EXTRA # endif @@ -36,5 +36,6 @@ #include "et.h" #include "steal.h" #include "stmsync.h" +#include "extra.h" #endif diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -81,6 +81,7 @@ int stm_enter_callback_call(void) { int token = (thread_descriptor == NULL); + dprintf(("enter_callback_call(tok=%d)\n", token)); if (token == 1) { stmgcpage_acquire_global_lock(); DescriptorInit(); @@ -94,6 +95,7 @@ void stm_leave_callback_call(int token) { + dprintf(("leave_callback_call(%d)\n", token)); if (token == 1) stmgc_minor_collect(); /* force everything out of the nursery */ From noreply at buildbot.pypy.org Sat Jul 6 21:37:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:37:35 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Import stmgc/88246694721e Message-ID: <20130706193735.E47EE1C3309@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65237:bb66659492be Date: 2013-07-06 16:20 +0200 http://bitbucket.org/pypy/pypy/changeset/bb66659492be/ Log: Import stmgc/88246694721e diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -007ac02eb935 +88246694721e diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -110,7 +110,10 @@ void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); +/* mostly for debugging support */ void stm_abort_and_retry(void); +void stm_minor_collect(void); +void stm_major_collect(void); /**************** END OF PUBLIC INTERFACE *****************/ diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -329,6 +329,18 @@ AbortNowIfDelayed(); /* if another thread ran a major GC */ } +void stm_minor_collect(void) +{ + stmgc_minor_collect(); + stmgcpage_possibly_major_collect(0); +} + +void stm_major_collect(void) +{ + stmgc_minor_collect(); + stmgcpage_possibly_major_collect(1); +} + /************************************************************/ /***** Prebuilt roots, added in the list as the transaction that changed From noreply at buildbot.pypy.org Sat Jul 6 21:37:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:37:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: test_collect. Message-ID: <20130706193737.2CF691C2FBF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65238:4be9b6ab9ba7 Date: 2013-07-06 16:21 +0200 http://bitbucket.org/pypy/pypy/changeset/4be9b6ab9ba7/ Log: test_collect. diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -610,6 +610,8 @@ OP_STM_ABORT_INFO_PUSH = _OP_STM OP_STM_ABORT_INFO_POP = _OP_STM OP_STM_INSPECT_ABORT_INFO = _OP_STM + OP_STM_MINOR_COLLECT = _OP_STM + OP_STM_MAJOR_COLLECT = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -175,6 +175,12 @@ result = funcgen.expr(op.result) return '%s = stm_inspect_abort_info();' % (result,) +def stm_minor_collect(funcgen, op): + return 'stm_minor_collect();' + +def stm_major_collect(funcgen, op): + return 'stm_major_collect();' + def op_stm(funcgen, op): func = globals()[op.opname] diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -113,6 +113,15 @@ cbuilder.cmdexec('') # assert did not crash + def test_collect(self): + def entry_point(argv): + rgc.collect(int(argv[1])) + return 0 + t, cbuilder = self.compile(entry_point) + cbuilder.cmdexec('0') + cbuilder.cmdexec('1') + # assert did not crash + def test_targetdemo(self): t, cbuilder = self.compile(targetdemo2.entry_point) data, dataerr = cbuilder.cmdexec('4 5000', err=True) From noreply at buildbot.pypy.org Sat Jul 6 21:37:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:37:38 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Fix the test. Now test_ztranslated passes completely. Message-ID: <20130706193738.7047D1C2FBF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65239:a7a33a68fd93 Date: 2013-07-06 16:23 +0200 http://bitbucket.org/pypy/pypy/changeset/a7a33a68fd93/ Log: Fix the test. Now test_ztranslated passes completely. diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -176,7 +176,7 @@ print bug2(1) return 0 # - perform_transaction = rstm.make_perform_transaction(check, FOOBAR) + perform_transaction = rstm.make_perform_transaction(check, FOOBARP) t, cbuilder = self.compile(entry_point, backendopt=True) data = cbuilder.cmdexec('') assert '12\n12\n' in data, "got: %r" % (data,) @@ -204,7 +204,7 @@ do_stuff() return 0 # - perform_transaction = rstm.make_perform_transaction(check, FOOBAR) + perform_transaction = rstm.make_perform_transaction(check, FOOBARP) t, cbuilder = self.compile(main) data = cbuilder.cmdexec('') assert '42\n' in data, "got: %r" % (data,) From noreply at buildbot.pypy.org Sat Jul 6 21:37:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:37:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: hg merge Message-ID: <20130706193740.3FFC41C2FBF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65240:cee8eec5ca90 Date: 2013-07-06 21:36 +0200 http://bitbucket.org/pypy/pypy/changeset/cee8eec5ca90/ Log: hg merge diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -5,7 +5,7 @@ from pypy.module.thread.threadlocals import BaseThreadLocals from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext -from pypy.interpreter.gateway import Wrappable, W_Root, interp2app +from pypy.interpreter.gateway import W_Root, interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty, descr_get_dict from rpython.rlib import rthread from rpython.rlib import rstm @@ -122,7 +122,7 @@ # ____________________________________________________________ -class STMLocal(Wrappable): +class STMLocal(W_Root): """Thread-local data""" @jit.dont_look_inside diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -92,7 +92,7 @@ self._build_wb_slowpath(False, withfloats=True) self._build_wb_slowpath(True, withfloats=True) self._build_propagate_exception_path() - if gc_ll_descr.get_malloc_slowpath_addr is not None: + if gc_ll_descr.get_malloc_slowpath_addr() is not None: # generate few slowpaths for various cases self.malloc_slowpath = self._build_malloc_slowpath(kind='fixed') self.malloc_slowpath_varsize = self._build_malloc_slowpath( diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -241,6 +241,9 @@ return self.malloc_array(arraydescr.basesize, num_elem, arraydescr.itemsize, arraydescr.lendescr.offset) + + def get_malloc_slowpath_addr(self): + return None # ____________________________________________________________ # All code below is for the hybrid or minimark GC @@ -267,37 +270,24 @@ rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) return rffi.cast(lltype.Signed, rst_addr) + class WriteBarrierDescr(AbstractDescr): - def __init__(self, gc_ll_descr, stmcat=None): + def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 - self.stmcat = stmcat - self.returns_modified_object = (stmcat is not None) - if not self.returns_modified_object: - self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address], lltype.Void)) - else: - self.WB_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( - [llmemory.Address], llmemory.Address)) + + self.returns_modified_object = False + self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( + [llmemory.Address], lltype.Void)) + self.fielddescr_tid = gc_ll_descr.fielddescr_tid self.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.HDRPTR = gc_ll_descr.HDRPTR # - if self.stmcat is not None: - cfunc_name = self.stmcat[2] - self.wb_failing_case_ptr = rffi.llexternal( - cfunc_name, - self.WB_FUNCPTR_MOD.TO.ARGS, - self.WB_FUNCPTR_MOD.TO.RESULT, - sandboxsafe=True, - _nowrapper=True) - # GCClass = gc_ll_descr.GCClass if GCClass is None: # for tests return - if self.stmcat is None: - self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG - else: - self.jit_wb_if_flag = self.stmcat[0] + + self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) # @@ -317,11 +307,7 @@ self.wb_slowpath = [0, 0, 0, 0] def repr_of_descr(self): - if self.stmcat is None: - return 'wbdescr' - else: - cat = self.stmcat[1] - return cat + return 'wbdescr' def __repr__(self): return '' % (self.repr_of_descr(),) @@ -376,7 +362,7 @@ self.wb_slowpath[withcards + 2 * withfloats] = addr @specialize.arg(2) - def _do_write_barrier(self, gcref_struct, returns_modified_object): + def _do_barrier(self, gcref_struct, returns_modified_object): assert self.returns_modified_object == returns_modified_object hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct) hdr_addr -= self.gcheaderbuilder.size_gc_header @@ -392,7 +378,50 @@ if returns_modified_object: return gcref_struct +class STMBarrierDescr(WriteBarrierDescr): + def __init__(self, gc_ll_descr, stmcat, cfunc_name): + WriteBarrierDescr.__init__(self, gc_ll_descr) + self.stmcat = stmcat + self.returns_modified_object = True + self.WB_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( + [llmemory.Address], llmemory.Address)) + self.wb_failing_case_ptr = rffi.llexternal( + cfunc_name, + self.WB_FUNCPTR_MOD.TO.ARGS, + self.WB_FUNCPTR_MOD.TO.RESULT, + sandboxsafe=True, + _nowrapper=True) + + def repr_of_descr(self): + cat = self.stmcat + return cat + + @specialize.arg(2) + def _do_barrier(self, gcref_struct, returns_modified_object): + assert self.returns_modified_object == returns_modified_object + # XXX: fastpath for Read and Write variants + funcptr = self.get_barrier_funcptr(returns_modified_object) + res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) + if returns_modified_object: + return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) + + +class STMReadBarrierDescr(STMBarrierDescr): + def __init__(self, gc_ll_descr, stmcat): + assert stmcat == 'P2R' + STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, + 'stm_read_barrier') + + + +class STMWriteBarrierDescr(STMBarrierDescr): + def __init__(self, gc_ll_descr, stmcat): + assert stmcat in ['P2W', 'R2W'] + STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, + 'stm_write_barrier') + + class GcLLDescr_framework(GcLLDescription): DEBUG = False # forced to True by x86/test/test_zrpy_gc.py kind = 'framework' @@ -487,18 +516,13 @@ else: self.write_barrier_descr = WriteBarrierDescr(self) def do_write_barrier(gcref_struct, gcref_newptr): - self.write_barrier_descr._do_write_barrier(gcref_struct, False) + self.write_barrier_descr._do_barrier(gcref_struct, False) self.do_write_barrier = do_write_barrier def _setup_barriers_for_stm(self): - from rpython.memory.gc import stmgc - WBDescr = WriteBarrierDescr - self.P2Rdescr = WBDescr(self, (stmgc.GCFLAG_GLOBAL, 'P2R', - 'stm_DirectReadBarrier')) - self.P2Wdescr = WBDescr(self, (stmgc.GCFLAG_NOT_WRITTEN, 'P2W', - 'stm_WriteBarrier')) - self.R2Wdescr = WBDescr(self, (stmgc.GCFLAG_NOT_WRITTEN, 'R2W', - 'stm_WriteBarrierFromReady')) + self.P2Rdescr = STMReadBarrierDescr(self, 'P2R') + self.P2Wdescr = STMWriteBarrierDescr(self, 'P2W') + self.R2Wdescr = STMWriteBarrierDescr(self, 'R2W') self.write_barrier_descr = "wbdescr: do not use" # @specialize.argtype(0) @@ -509,7 +533,7 @@ descr = self.P2Wdescr else: descr = self.P2Rdescr - return descr._do_write_barrier(gcref, True) + return descr._do_barrier(gcref, True) self.do_stm_barrier = do_stm_barrier def _make_functions(self, really_not_translated): diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -121,8 +121,13 @@ except KeyError: return v_base # no barrier needed args = [v_base, self.c_zero] - self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None, + if target_category == 'W': + op = rop.COND_CALL_STM_WB + else: + op = rop.COND_CALL_STM_RB + self.newops.append(ResOperation(op, args, None, descr=write_barrier_descr)) + self.known_category[v_base] = target_category return v_base diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -213,7 +213,7 @@ gc_ll_descr = self.gc_ll_descr llop1 = self.llop1 # - rewriter = gc.GcRewriterAssembler(gc_ll_descr, None) + rewriter = GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() v_value = BoxPtr() diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -35,6 +35,7 @@ tdescr = get_size_descr(self.gc_ll_descr, T) tdescr.tid = 5678 tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + tydescr = get_field_descr(self.gc_ll_descr, T, 'y') # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1,8 +1,9 @@ from rpython.jit.backend.llsupport.descr import * from rpython.jit.backend.llsupport.gc import * from rpython.jit.metainterp.gc import get_description -from rpython.jit.backend.llsupport.test.test_rewrite import RewriteTests - +from rpython.jit.backend.llsupport.test.test_rewrite import ( + RewriteTests, BaseFakeCPU) +from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory class TestStm(RewriteTests): def setup_method(self, meth): @@ -17,11 +18,12 @@ self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, really_not_translated=True) # - class FakeCPU(object): + class FakeCPU(BaseFakeCPU): def sizeof(self, STRUCT): descr = SizeDescrWithVTable(104) descr.tid = 9315 return descr + self.cpu = FakeCPU() def check_rewrite(self, frm_operations, to_operations, **namespace): @@ -42,12 +44,14 @@ jump() """, """ [p1, p2] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) jump() """) def test_rewrite_setfield_gc_const(self): + TP = lltype.GcArray(lltype.Signed) + NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) self.check_rewrite(""" [p1, p2] setfield_gc(ConstPtr(t), p2, descr=tzdescr) @@ -55,10 +59,10 @@ """, """ [p1, p2] p3 = same_as(ConstPtr(t)) - cond_call_gc_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, 0, descr=P2Wdescr) setfield_gc(p3, p2, descr=tzdescr) jump() - """) + """, t=NULL) def test_rewrite_setfield_gc_on_local(self): self.check_rewrite(""" @@ -83,9 +87,9 @@ jump() """, """ [p1, p2, p3, p4] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) - cond_call_gc_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, 0, descr=P2Wdescr) setfield_gc(p3, p4, descr=tzdescr) jump() """) @@ -98,7 +102,7 @@ jump() """, """ [p1, p2, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) setfield_gc(p1, i3, descr=tydescr) jump() @@ -113,10 +117,10 @@ jump(p1) """, """ [p1, p2, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) label(p1, i3) - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, i3, descr=tydescr) jump(p1) """) @@ -158,12 +162,14 @@ jump(p2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) """) def test_rewrite_getfield_gc_const(self): + TP = lltype.GcArray(lltype.Signed) + NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) self.check_rewrite(""" [p1] p2 = getfield_gc(ConstPtr(t), descr=tzdescr) @@ -171,10 +177,10 @@ """, """ [p1] p3 = same_as(ConstPtr(t)) - cond_call_gc_wb(p3, 0, descr=P2Rdescr) + cond_call_stm_rb(p3, 0, descr=P2Rdescr) p2 = getfield_gc(p3, descr=tzdescr) jump(p2) - """) + """, t=NULL) # XXX could do better: G2Rdescr def test_rewrite_getarrayitem_gc(self): @@ -184,7 +190,7 @@ jump(i3) """, """ [p1, i2] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) i3 = getarrayitem_gc(p1, i2, descr=adescr) jump(i3) """) @@ -196,7 +202,7 @@ jump(i3) """, """ [p1, i2] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) i3 = getinteriorfield_gc(p1, i2, descr=adescr) jump(i3) """) @@ -209,7 +215,7 @@ jump(p2, i2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) i2 = getfield_gc(p1, descr=tydescr) jump(p2, i2) @@ -223,9 +229,9 @@ jump(p2, i2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) - cond_call_gc_wb(p2, 0, descr=P2Rdescr) + cond_call_stm_rb(p2, 0, descr=P2Rdescr) i2 = getfield_gc(p2, descr=tydescr) jump(p2, i2) """) @@ -241,10 +247,10 @@ jump(p1) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) i1 = getfield_gc(p1, descr=tydescr) i2 = int_add(i1, 1) - cond_call_gc_wb(p1, 0, descr=R2Wdescr) + cond_call_stm_wb(p1, 0, descr=R2Wdescr) setfield_gc(p1, i2, descr=tydescr) jump(p1) """) @@ -257,7 +263,7 @@ jump(p2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 123, descr=tydescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) @@ -289,10 +295,10 @@ jump(p2) """, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) call(p2) - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 5, descr=tydescr) jump(p2) """) @@ -300,62 +306,62 @@ def test_getfield_raw(self): self.check_rewrite(""" [i1, i2] - i3 = getfield_raw(i1, descr=?) + i3 = getfield_raw(i1, descr=tydescr) keepalive(i3) # random ignored operation - i4 = getfield_raw(i2, descr=?) + i4 = getfield_raw(i2, descr=tydescr) jump(i3, i4) """, """ [i1, i2] $INEV - i3 = getfield_raw(i1, descr=?) + i3 = getfield_raw(i1, descr=tydescr) keepalive(i3) - i4 = getfield_raw(i2, descr=?) + i4 = getfield_raw(i2, descr=tydescr) jump(i3, i4) """) def test_getfield_raw_over_label(self): self.check_rewrite(""" [i1, i2] - i3 = getfield_raw(i1, descr=?) + i3 = getfield_raw(i1, descr=tydescr) label(i1, i2, i3) - i4 = getfield_raw(i2, descr=?) + i4 = getfield_raw(i2, descr=tydescr) jump(i3, i4) """, """ [i1, i2] $INEV - i3 = getfield_raw(i1, descr=?) + i3 = getfield_raw(i1, descr=tydescr) label(i1, i2, i3) $INEV - i4 = getfield_raw(i2, descr=?) + i4 = getfield_raw(i2, descr=tydescr) jump(i3, i4) """) def test_getarrayitem_raw(self): self.check_rewrite(""" [i1, i2] - i3 = getarrayitem_raw(i1, 5, descr=?) - i4 = getarrayitem_raw(i2, i3, descr=?) + i3 = getarrayitem_raw(i1, 5, descr=adescr) + i4 = getarrayitem_raw(i2, i3, descr=adescr) jump(i3, i4) """, """ [i1, i2] $INEV - i3 = getarrayitem_raw(i1, 5, descr=?) - i4 = getarrayitem_raw(i2, i3, descr=?) + i3 = getarrayitem_raw(i1, 5, descr=adescr) + i4 = getarrayitem_raw(i2, i3, descr=adescr) jump(i3, i4) """) def test_rewrite_unrelated_setarrayitem_gcs(self): self.check_rewrite(""" [p1, i1, p2, p3, i3, p4] - setarrayitem_gc(p1, i1, p2, descr=?) - setarrayitem_gc(p3, i3, p4, descr=?) + setarrayitem_gc(p1, i1, p2, descr=adescr) + setarrayitem_gc(p3, i3, p4, descr=adescr) jump() """, """ [p1, i1, p2, p3, i3, p4] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) - setarrayitem_gc(p1, i1, p2, descr=?) - cond_call_gc_wb(p3, 0, descr=P2Wdescr) - setarrayitem_gc(p3, i3, p4, descr=?) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) + setarrayitem_gc(p1, i1, p2, descr=adescr) + cond_call_stm_wb(p3, 0, descr=P2Wdescr) + setarrayitem_gc(p3, i3, p4, descr=adescr) jump() """) @@ -368,7 +374,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setarrayitem_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setarrayitem_gc(p1, i3, p3, descr=adescr) @@ -384,7 +390,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setinteriorfield_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setinteriorfield_gc(p1, i3, p3, descr=adescr) @@ -399,7 +405,7 @@ jump() """, """ [p1, i2, i3] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) jump() @@ -408,10 +414,12 @@ "that p1 is already a W") def test_fallback_to_inevitable(self): + T = rffi.CArrayPtr(rffi.TIME_T) + calldescr2 = get_call_descr(self.gc_ll_descr, [T], rffi.TIME_T) oplist = [ - "setfield_raw(i1, i2, descr=?)", - "setarrayitem_raw(i1, i2, i3, descr=?)", - "setinteriorfield_raw(i1, i2, i3, descr=?)", + "setfield_raw(i1, i2, descr=tydescr)", + "setarrayitem_raw(i1, i2, i3, descr=tydescr)", + "setinteriorfield_raw(i1, i2, i3, descr=adescr)", "call_release_gil(123, descr=calldescr2)", "escape(i1)", # a generic unknown operation ] @@ -424,14 +432,14 @@ jump(i2, p7) """ % op, """ [i1, i2, i3, p7] - cond_call_gc_wb(p7, 0, descr=P2Wdescr) + cond_call_stm_wb(p7, 0, descr=P2Wdescr) setfield_gc(p7, 10, descr=tydescr) $INEV %s - cond_call_gc_wb(p7, 0, descr=P2Wdescr) + cond_call_stm_wb(p7, 0, descr=P2Wdescr) setfield_gc(p7, 20, descr=tydescr) jump(i2, p7) - """ % op) + """ % op, calldescr2=calldescr2) def test_copystrcontent(self): self.check_rewrite(""" @@ -440,8 +448,8 @@ jump() """, """ [p1, p2, i1, i2, i3] - cond_call_gc_wb(p2, 0, descr=P2Wdescr) - cond_call_gc_wb(p1, 0, descr=P2Rdescr) + cond_call_stm_wb(p2, 0, descr=P2Wdescr) + cond_call_stm_rb(p1, 0, descr=P2Rdescr) copystrcontent(p1, p2, i1, i2, i3) jump() """) @@ -460,7 +468,7 @@ jump(p1) """ % op, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s setfield_gc(p1, 20, descr=tydescr) @@ -468,8 +476,10 @@ """ % op) def test_call_force(self): + T = rffi.CArrayPtr(rffi.TIME_T) + calldescr2 = get_call_descr(self.gc_ll_descr, [T], rffi.TIME_T) for op in ["call(123, descr=calldescr2)", - "call_assembler(123, descr=loopdescr)", + "call_assembler(123, descr=casmdescr)", "call_may_force(123, descr=calldescr2)", "call_loopinvariant(123, descr=calldescr2)", ]: @@ -481,13 +491,13 @@ jump(p1) """ % op, """ [p1] - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s - cond_call_gc_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, 0, descr=P2Wdescr) setfield_gc(p1, 20, descr=tydescr) jump(p1) - """ % op) + """ % op, calldescr2=calldescr2) def test_ptr_eq_null(self): self.check_rewrite(""" diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4242,3 +4242,14 @@ assert rffi.cast(lltype.Signed, a[0]) == -7654 assert rffi.cast(lltype.Signed, a[1]) == 777 lltype.free(a, flavor='raw') + +class WBDescrForTests(AbstractDescr): + returns_modified_object = False + wb_slowpath = (0, 0, 0, 0) + def get_wb_slowpath(self, c1, c2): + return self.wb_slowpath[c1+2*c2] + def set_wb_slowpath(self, c1, c2, addr): + i = c1+2*c2 + self.wb_slowpath = (self.wb_slowpath[:i] + (addr,) + + self.wb_slowpath[i+1:]) + diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -345,6 +345,8 @@ if value in (rop.FORCE_TOKEN, rop.CALL_ASSEMBLER, rop.COND_CALL_GC_WB, + rop.COND_CALL_STM_WB, + rop.COND_CALL_STM_RB, rop.COND_CALL_GC_WB_ARRAY, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -500,6 +500,8 @@ 'STRSETITEM/3', 'UNICODESETITEM/3', #'RUNTIMENEW/1', # ootype operation + 'COND_CALL_STM_WB/2d', # [objptr, newvalue] (write barrier) + 'COND_CALL_STM_RB/2d', # [objptr, newvalue] (read barrier) 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) 'DEBUG_MERGE_POINT/*', # debugging only diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -8,6 +8,27 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib.debug import ll_assert +from rpython.rlib.rarithmetic import LONG_BIT, r_uint + +WORD = LONG_BIT // 8 +NULL = llmemory.NULL + +# keep in sync with stmgc.h & et.h: +first_gcflag = 1 << (LONG_BIT//2) +GCFLAG_OLD = first_gcflag << 0 +GCFLAG_VISITED = first_gcflag << 1 +GCFLAG_PUBLIC = first_gcflag << 2 +GCFLAG_PREBUILT_ORIGINAL = first_gcflag << 3 +GCFLAG_PUBLIC_TO_PRIVATE = first_gcflag << 4 +GCFLAG_WRITE_BARRIER = first_gcflag << 5 # stmgc.h +GCFLAG_NURSERY_MOVED = first_gcflag << 6 +GCFLAG_BACKUP_COPY = first_gcflag << 7 # debug +GCFLAG_STUB = first_gcflag << 8 # debug +GCFLAG_PRIVATE_FROM_PROTECTED = first_gcflag << 9 +GCFLAG_HAS_ID = first_gcflag << 10 + +PREBUILT_FLAGS = first_gcflag * (1 + 2 + 4 + 8) +PREBUILT_REVISION = r_uint(1) class StmGC(MovingGCBase): diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -1,5 +1,5 @@ from rpython.rlib.objectmodel import we_are_translated, specialize -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -584,34 +584,34 @@ from rpython.translator.stm.funcgen import op_stm self.__class__.op_stm = op_stm return self.op_stm(op) - OP_STM_INITIALIZE = _OP_STM - OP_STM_FINALIZE = _OP_STM - OP_STM_BECOME_INEVITABLE = _OP_STM - OP_STM_BARRIER = _OP_STM - OP_STM_PTR_EQ = _OP_STM - OP_STM_PUSH_ROOT = _OP_STM - OP_STM_POP_ROOT_INTO = _OP_STM - OP_STM_ALLOCATE = _OP_STM - OP_STM_GET_TID = _OP_STM - OP_STM_HASH = _OP_STM - OP_STM_ID = _OP_STM - OP_STM_COMMIT_TRANSACTION = _OP_STM + OP_STM_INITIALIZE = _OP_STM + OP_STM_FINALIZE = _OP_STM + OP_STM_BECOME_INEVITABLE = _OP_STM + OP_STM_BARRIER = _OP_STM + OP_STM_PTR_EQ = _OP_STM + OP_STM_PUSH_ROOT = _OP_STM + OP_STM_POP_ROOT_INTO = _OP_STM + OP_STM_ALLOCATE = _OP_STM + OP_STM_GET_TID = _OP_STM + OP_STM_HASH = _OP_STM + OP_STM_ID = _OP_STM + OP_STM_COMMIT_TRANSACTION = _OP_STM OP_STM_BEGIN_INEVITABLE_TRANSACTION = _OP_STM - OP_STM_SHOULD_BREAK_TRANSACTION = _OP_STM - OP_STM_SET_TRANSACTION_LENGTH = _OP_STM - OP_STM_CHANGE_ATOMIC = _OP_STM - OP_STM_GET_ATOMIC = _OP_STM - OP_STM_THREADLOCAL_GET = _OP_STM - OP_STM_THREADLOCAL_SET = _OP_STM - OP_STM_PERFORM_TRANSACTION = _OP_STM - OP_STM_ENTER_CALLBACK_CALL = _OP_STM - OP_STM_LEAVE_CALLBACK_CALL = _OP_STM - OP_STM_ABORT_AND_RETRY = _OP_STM - OP_STM_ABORT_INFO_PUSH = _OP_STM - OP_STM_ABORT_INFO_POP = _OP_STM - OP_STM_INSPECT_ABORT_INFO = _OP_STM - OP_STM_MINOR_COLLECT = _OP_STM - OP_STM_MAJOR_COLLECT = _OP_STM + OP_STM_SHOULD_BREAK_TRANSACTION = _OP_STM + OP_STM_SET_TRANSACTION_LENGTH = _OP_STM + OP_STM_CHANGE_ATOMIC = _OP_STM + OP_STM_GET_ATOMIC = _OP_STM + OP_STM_THREADLOCAL_GET = _OP_STM + OP_STM_THREADLOCAL_SET = _OP_STM + OP_STM_PERFORM_TRANSACTION = _OP_STM + OP_STM_ENTER_CALLBACK_CALL = _OP_STM + OP_STM_LEAVE_CALLBACK_CALL = _OP_STM + OP_STM_ABORT_AND_RETRY = _OP_STM + OP_STM_ABORT_INFO_PUSH = _OP_STM + OP_STM_ABORT_INFO_POP = _OP_STM + OP_STM_INSPECT_ABORT_INFO = _OP_STM + OP_STM_MAJOR_COLLECT = _OP_STM + OP_STM_MINOR_COLLECT = _OP_STM def OP_PTR_NONZERO(self, op): diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -141,11 +141,11 @@ translator = self.translator if self.config.translation.stm: - from rpython.translator.stm import transform2 + from rpython.translator.stm import transform self.getentrypointptr() # build the wrapper first # ^^ this is needed to make sure we see the no-GC wrapper function # calling the GC entrypoint function. - stmtransformer = transform2.STMTransformer(self.translator) + stmtransformer = transform.STMTransformer(self.translator) stmtransformer.transform() gcpolicyclass = self.get_gcpolicyclass() diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py --- a/rpython/translator/stm/test/test_inevitable.py +++ b/rpython/translator/stm/test/test_inevitable.py @@ -160,7 +160,7 @@ extfunc() res = self.interpret_inevitable(f1, []) - assert res == 'direct_call' + assert res == 'extfunc()' def test_rpy_direct_call(self): def f2(): diff --git a/rpython/translator/stm/test/test_jitdriver.py b/rpython/translator/stm/test/test_jitdriver.py --- a/rpython/translator/stm/test/test_jitdriver.py +++ b/rpython/translator/stm/test/test_jitdriver.py @@ -1,5 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.stm.test.transform2_support import BaseTestTransform +from rpython.translator.stm.test.transform_support import BaseTestTransform from rpython.rlib.jit import JitDriver diff --git a/rpython/translator/stm/test/test_stmgcintf.c b/rpython/translator/stm/test/test_stmgcintf.c --- a/rpython/translator/stm/test/test_stmgcintf.c +++ b/rpython/translator/stm/test/test_stmgcintf.c @@ -11,6 +11,7 @@ struct pypy_header0 { long h_tid; Unsigned h_revision; + Unsigned h_original; }; struct pypy_pypy_rlib_rstm_Transaction0 { @@ -33,7 +34,8 @@ #define _RPyString_AsString(x) x #define RPyString_Size(x) strlen(x) - +#include "src_stm/stmgc.h" +#include "src_stm/stmimpl.h" #include "src_stm/et.h" #include "src_stm/et.c" diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -1,5 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.stm.test.transform2_support import BaseTestTransform +from rpython.translator.stm.test.transform_support import BaseTestTransform class TestTransform(BaseTestTransform): diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -133,12 +133,16 @@ rgc.collect(0) return 0 # + from rpython.rtyper.lltypesystem.rclass import OBJECTPTR + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) class X: def __init__(self, count): self.count = count def g(): x = X(1000) - perform_transaction(lltype.malloc(FOOBAR)) + perform_transaction(lltype.malloc(S)) return x def entry_point(argv): x = X(len(argv)) @@ -146,7 +150,7 @@ print '<', x.count, y.count, '>' return 0 # - perform_transaction = rstm.make_perform_transaction(check, FOOBARP) + perform_transaction = rstm.make_perform_transaction(check, PS) t, cbuilder = self.compile(entry_point, backendopt=True) data = cbuilder.cmdexec('a b c d') assert '< 5 1000 >' in data, "got: %r" % (data,) @@ -160,11 +164,15 @@ pass prebuilt2 = [X2(), X2()] # + from rpython.rtyper.lltypesystem.rclass import OBJECTPTR + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) def bug2(count): x = prebuilt2[count] x.foobar = 2 # 'x' becomes a local # - perform_transaction(lltype.malloc(FOOBAR)) + perform_transaction(lltype.malloc(S)) # 'x' becomes the global again # y = prebuilt2[count] # same prebuilt obj @@ -176,7 +184,7 @@ print bug2(1) return 0 # - perform_transaction = rstm.make_perform_transaction(check, FOOBARP) + perform_transaction = rstm.make_perform_transaction(check, PS) t, cbuilder = self.compile(entry_point, backendopt=True) data = cbuilder.cmdexec('') assert '12\n12\n' in data, "got: %r" % (data,) @@ -184,6 +192,12 @@ def test_prebuilt_nongc(self): def check(foobar, retry_counter): return 0 # do nothing + from rpython.rtyper.lltypesystem.rclass import OBJECTPTR + from rpython.rtyper.lltypesystem import lltype + S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) + PS = lltype.Ptr(S) + perform_transaction = rstm.make_perform_transaction(check, PS) + from rpython.rtyper.lltypesystem import lltype R = lltype.GcStruct('R', ('x', lltype.Signed)) S1 = lltype.Struct('S1', ('r', lltype.Ptr(R))) @@ -192,7 +206,7 @@ # hints={'stm_thread_local': True}) #s2 = lltype.malloc(S2, immortal=True, flavor='raw') def do_stuff(): - perform_transaction(lltype.malloc(FOOBAR)) + perform_transaction(lltype.malloc(S)) print s1.r.x #print s2.r.x do_stuff._dont_inline_ = True @@ -204,7 +218,7 @@ do_stuff() return 0 # - perform_transaction = rstm.make_perform_transaction(check, FOOBARP) + perform_transaction = rstm.make_perform_transaction(check, PS) t, cbuilder = self.compile(main) data = cbuilder.cmdexec('') assert '42\n' in data, "got: %r" % (data,) @@ -262,9 +276,3 @@ t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') assert 'li102ee10:hi there 3e\n0\n' in data - - -FOOBAR = lltype.GcStruct('FOOBAR', - ('result_value', lltype.Void), - ('got_exception', rclass.OBJECTPTR)) -FOOBARP = lltype.Ptr(FOOBAR) diff --git a/rpython/translator/stm/test/transform2_support.py b/rpython/translator/stm/test/transform_support.py rename from rpython/translator/stm/test/transform2_support.py rename to rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform2_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import lltype, opimpl from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache -from rpython.translator.stm.transform2 import STMTransformer +from rpython.translator.stm.transform import STMTransformer from rpython.translator.stm.writebarrier import MORE_PRECISE_CATEGORIES from rpython.conftest import option diff --git a/rpython/translator/stm/transform2.py b/rpython/translator/stm/transform.py rename from rpython/translator/stm/transform2.py rename to rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform2.py +++ b/rpython/translator/stm/transform.py @@ -1,3 +1,10 @@ +from rpython.translator.backendopt.writeanalyze import WriteAnalyzer +from rpython.translator.stm.writebarrier import insert_stm_barrier +from rpython.translator.stm.inevitable import insert_turn_inevitable +from rpython.translator.stm.jitdriver import reorganize_around_jit_driver +from rpython.translator.stm.threadlocalref import transform_tlref +from rpython.translator.c.support import log + class STMTransformer(object): @@ -18,38 +25,27 @@ self.print_logs_after_gc() def transform_write_barrier(self): - from rpython.translator.backendopt.writeanalyze import WriteAnalyzer - from rpython.translator.stm.writebarrier import insert_stm_barrier - # self.write_analyzer = WriteAnalyzer(self.translator) for graph in self.translator.graphs: insert_stm_barrier(self, graph) del self.write_analyzer def transform_turn_inevitable(self): - from rpython.translator.stm.inevitable import insert_turn_inevitable - # for graph in self.translator.graphs: insert_turn_inevitable(graph) def transform_jit_driver(self): - from rpython.translator.stm.jitdriver import reorganize_around_jit_driver - # for graph in self.translator.graphs: reorganize_around_jit_driver(self, graph) def transform_threadlocalref(self): - from rpython.translator.stm.threadlocalref import transform_tlref transform_tlref(self.translator) def start_log(self): - from rpython.translator.c.support import log log.info("Software Transactional Memory transformation") def print_logs(self): - from rpython.translator.c.support import log log.info("Software Transactional Memory transformation applied") def print_logs_after_gc(self): - from rpython.translator.c.support import log log.info("Software Transactional Memory transformation-after-gc done") From noreply at buildbot.pypy.org Sat Jul 6 21:38:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:38:12 +0200 (CEST) Subject: [pypy-commit] stmgc default: Test and fix for stm_pointer_equal with null pointers Message-ID: <20130706193812.442A81C2FBF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r365:dd0aff1663a1 Date: 2013-07-06 21:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/dd0aff1663a1/ Log: Test and fix for stm_pointer_equal with null pointers diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -132,6 +132,10 @@ /* fast path for two equal pointers */ if (p1 == p2) return 1; + /* if p1 or p2 is NULL (but not both, because they are different + pointers), then return 0 */ + if (p1 == NULL || p2 == NULL) + return 0; /* types must be the same */ if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) return 0; diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -102,3 +102,15 @@ c = lib.stm_inspect_abort_info() assert c assert ffi.string(c).endswith("ei424242ee") + +def test_pointer_equal(): + p = palloc(HDR) + assert lib.stm_pointer_equal(p, p) + assert not lib.stm_pointer_equal(p, ffi.NULL) + assert not lib.stm_pointer_equal(ffi.NULL, p) + assert lib.stm_pointer_equal(ffi.NULL, ffi.NULL) + q = lib.stm_write_barrier(p) + assert q != p + assert lib.stm_pointer_equal(p, q) + assert lib.stm_pointer_equal(q, q) + assert lib.stm_pointer_equal(q, p) From noreply at buildbot.pypy.org Sat Jul 6 21:38:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 21:38:58 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Import stmgc/dd0aff1663a1 Message-ID: <20130706193858.071611C2FBF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65241:7d53e33d17d1 Date: 2013-07-06 21:38 +0200 http://bitbucket.org/pypy/pypy/changeset/7d53e33d17d1/ Log: Import stmgc/dd0aff1663a1 diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -133,6 +133,10 @@ /* fast path for two equal pointers */ if (p1 == p2) return 1; + /* if p1 or p2 is NULL (but not both, because they are different + pointers), then return 0 */ + if (p1 == NULL || p2 == NULL) + return 0; /* types must be the same */ if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) return 0; diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -88246694721e +dd0aff1663a1 From noreply at buildbot.pypy.org Sat Jul 6 22:11:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 22:11:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Add Yet Another piece of text about why we downplay RPython. Message-ID: <20130706201148.EF3241C05DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65242:2ec1dce03744 Date: 2013-07-06 22:11 +0200 http://bitbucket.org/pypy/pypy/changeset/2ec1dce03744/ Log: Add Yet Another piece of text about why we downplay RPython. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -332,11 +332,29 @@ your code in RPython, it might not give you any speed improvements even if you manage to. + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` --------------------------------------------------- Which backends are there for the RPython toolchain? From noreply at buildbot.pypy.org Sat Jul 6 22:17:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 22:17:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a paragraph Message-ID: <20130706201708.D788A1C05DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65243:733e10ed54c4 Date: 2013-07-06 22:16 +0200 http://bitbucket.org/pypy/pypy/changeset/733e10ed54c4/ Log: Add a paragraph diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -349,6 +349,12 @@ attempt to point newcomers at existing alternatives, which are more mainstream and where they will get help from many people.* + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. This is open source, which + means that anybody is free to promote and develop anything; but it + also means that you must let us choose* not *to go into that direction + ourselves.* + Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code From noreply at buildbot.pypy.org Sat Jul 6 22:20:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 6 Jul 2013 22:20:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Move the new paragraphs Message-ID: <20130706202016.A35371C05DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65244:84f919da866d Date: 2013-07-06 22:19 +0200 http://bitbucket.org/pypy/pypy/changeset/84f919da866d/ Log: Move the new paragraphs diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -332,6 +332,13 @@ your code in RPython, it might not give you any speed improvements even if you manage to. +Yes, it is possible with enough effort to compile small self-contained +pieces of RPython code doing a few performance-sensitive things. But +this case is not interesting for us. If you needed to rewrite the code +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + *The above paragraphs are not the whole truth. It* is *true that there are cases where writing a program as RPython gives you substantially better speed than running it on top of PyPy. However, the attitude of @@ -355,13 +362,6 @@ also means that you must let us choose* not *to go into that direction ourselves.* -Yes, it is possible with enough effort to compile small self-contained -pieces of RPython code doing a few performance-sensitive things. But -this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C or C++ or Java for -example. These are much more supported, much more documented languages -`:-)` - --------------------------------------------------- Which backends are there for the RPython toolchain? --------------------------------------------------- From noreply at buildbot.pypy.org Sun Jul 7 00:24:07 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 7 Jul 2013 00:24:07 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: more w_ corrections, try to be more clever in call2 for subtype compatability with numpy Message-ID: <20130706222407.C69091C05DF@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65245:2c2324b57f8c Date: 2013-07-07 01:21 +0300 http://bitbucket.org/pypy/pypy/changeset/2c2324b57f8c/ Log: more w_ corrections, try to be more clever in call2 for subtype compatability with numpy diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -28,6 +28,7 @@ def __init__(self, implementation): assert isinstance(implementation, BaseArrayImplementation) + assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod @@ -41,13 +42,13 @@ impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) if w_subtype: - ret = space.allocate_instance(W_NDimArray, space.type(w_subtype)) - W_NDimArray.__init__(ret, impl) - space.call_function(space.getattr(ret, + w_ret = space.allocate_instance(W_NDimArray, space.type(w_subtype)) + W_NDimArray.__init__(w_ret, impl) + assert isinstance(w_ret, W_NDimArray) + space.call_function(space.getattr(w_ret, space.wrap('__array_finalize__')), w_subtype) - else: - ret = W_NDimArray(impl) - return ret + return w_ret + return W_NDimArray(impl) @staticmethod def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): @@ -62,11 +63,11 @@ impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) if w_subtype: - ret = space.allocate_instance(W_NDimArray, space.type(w_subtype)) - W_NDimArray.__init__(ret, impl) - space.call_function(space.getattr(ret, + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_function(space.getattr(w_ret, space.wrap('__array_finalize__')), w_subtype) - return ret + return w_ret return W_NDimArray(impl) @staticmethod diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -148,24 +148,24 @@ def repeat(space, w_arr, repeats, w_axis): arr = convert_to_array(space, w_arr) if space.is_none(w_axis): - w_arr = arr.descr_flatten(space) - orig_size = w_arr.get_shape()[0] - shape = [w_arr.get_shape()[0] * repeats] - w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=w_arr) + arr = arr.descr_flatten(space) + orig_size = arr.get_shape()[0] + shape = [arr.get_shape()[0] * repeats] + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, - orig_size)]).apply(space, w_res).implementation.setslice(space, w_arr) + orig_size)]).apply(space, w_res).implementation.setslice(space, arr) else: axis = space.int_w(w_axis) - shape = w_arr.get_shape()[:] + shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=w_arr) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) - Chunks(chunks).apply(space, w_res).implementation.setslice(space, w_arr) + Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) return w_res def count_nonzero(space, w_obj): diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -65,7 +65,7 @@ if length == 1: return base_iter.getitem() res = W_NDimArray.from_shape(space, [length], base.get_dtype(), - base.get_order(), subtype=base) + base.get_order(), w_subtype=base) return loop.flatiter_getitem(res, base_iter, step) def descr_setitem(self, space, w_idx, w_value): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -147,11 +147,11 @@ chunks = self.implementation._prepare_slice_args(space, w_index) return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] - res = W_NDimArray.from_shape(space, shape, self.get_dtype(), - self.get_order(), subtype=self) - if not res.get_size(): - return res - return loop.getitem_array_int(space, self, res, iter_shape, indexes, + w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), + self.get_order(), w_subtype=self) + if not w_res.get_size(): + return w_res + return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, prefix) def setitem_array_int(self, space, w_index, w_value): @@ -910,6 +910,8 @@ @unwrap_spec(offset=int, order=str) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, order='C'): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray + from pypy.module.micronumpy.support import calc_strides if (offset != 0 or not space.is_none(w_strides) or not space.is_none(w_buffer)): raise OperationError(space.w_NotImplementedError, @@ -921,11 +923,17 @@ return W_NDimArray.new_scalar(space, dtype) if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) - raise OperationError(space.w_TypeError, space.wrap( - "__new__ is not meant to be called except with a ndarray")) + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = ConcreteArray(shape, dtype.base, order, strides, + backstrides) + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_function(space.getattr(w_ret, + space.wrap('__array_finalize__')), w_subtype) + return w_ret @unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subclass=None): +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -937,9 +945,12 @@ space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - if w_subclass: + if w_subtype: + if not space.isinstance_w(w_subtype, space.w_type): + raise OperationError(space.w_ValueError, space.wrap( + "subtype must be a subtype of ndarray, not a class instance")) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - 'C', False, w_subclass) + 'C', False, w_subtype) else: return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) @@ -1122,12 +1133,12 @@ dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape - arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - arr_iter = arr.create_iter() + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + arr_iter = w_arr.create_iter() for w_elem in elems_w: arr_iter.setitem(dtype.coerce(space, w_elem)) arr_iter.next() - return arr + return w_arr @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): @@ -1137,7 +1148,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) + return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): @@ -1147,10 +1158,10 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) - arr.fill(one) - return space.wrap(arr) + w_arr.fill(one) + return w_arr def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -182,7 +182,7 @@ if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, - subtype=obj) + w_subtype=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -21,8 +21,33 @@ def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): # handle array_priority + # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does: + # 1. if __array_priorities__ are equal and one is an ndarray and the + # other is a subtype, flip the order + # 2. elif rhs.__array_priority__ is higher, flip the order + # Now return the subtype of the first one + + w_ndarray = space.gettypefor(W_NDimArray) + lhs_type = space.type(w_lhs) + rhs_type = space.type(w_rhs) + lhs_for_subtype = w_lhs + rhs_for_subtype = w_rhs + #it may be something like a FlatIter, which is not an ndarray + if not lhs_type.issubtype(w_ndarray): + lhs_type = space.type(w_lhs.base) + lhs_for_subtype = w_lhs.base + if not rhs_type.issubtype(w_ndarray): + rhs_type = space.gettypefor(w_rhs.base) + rhs_for_subtype = w_rhs.base + if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray): + w_lhs, w_rhs = w_rhs, w_lhs + lhs_for_subtype = rhs_for_subtype + + # TODO handle __array_priorities__ and maybe flip the order + if out is None: - out = W_NDimArray.from_shape(space, shape, res_dtype, w_subtype=w_lhs) + out = W_NDimArray.from_shape(space, shape, res_dtype, + w_subtype=lhs_for_subtype) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -438,12 +463,12 @@ def tostring(space, arr): builder = StringBuilder() iter = arr.create_iter() - res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') + w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().itemtype.get_element_size() res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - res_str.implementation.get_storage_as_int(space)) + w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): - res_str.implementation.setitem(0, iter.getitem()) + w_res_str.implementation.setitem(0, iter.getitem()) for i in range(itemsize): builder.append(res_str_casted[i]) iter.next() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1477,7 +1477,6 @@ def __new__(subtype, shape, dtype): self = ndarray.__new__(subtype, shape, dtype) self.id = 'subtype' - print 'called new' return self a = C([2, 2], int) assert isinstance(a, C) @@ -1485,11 +1484,8 @@ assert a.shape == (2, 2) assert a.dtype is dtype(int) assert a.id == 'subtype' - print '1' a = a.reshape(1, 4) - print '2' b = a.reshape(4, 1) - print '3' assert isinstance(b, C) #make sure __new__ was not called assert not getattr(b, 'id', None) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -11,7 +11,7 @@ def __new__(cls, subtype): raise ValueError('should not call __new__') def __array_finalize__(self, obj): - + self.called_finalize = True return NoNew ''') cls.w_SubType = cls.space.appexec([], '''(): @@ -48,13 +48,11 @@ assert obj.info is None obj = InfoArray(shape=(3,), info='information') assert obj.info == 'information' - print 'a' v = obj[1:] assert isinstance(v, InfoArray) assert v.base is obj assert v.info == 'information' arr = np.arange(10) - print '1' cast_arr = arr.view(InfoArray) assert isinstance(cast_arr, InfoArray) assert cast_arr.base is arr @@ -70,7 +68,13 @@ assert False def test_sub_flatiter(self): - assert False + from numpypy import array + a = array(range(9)).reshape(3, 3).view(self.NoNew) + c = array(range(9)).reshape(3, 3) + assert isinstance(a.flat[:] + a.flat[:], self.NoNew) + assert isinstance(a.flat[:] + c.flat[:], self.NoNew) + assert isinstance(c.flat[:] + a.flat[:], self.NoNew) + assert not isinstance(c.flat[:] + c.flat[:], self.NoNew) def test_sub_getitem_filter(self): assert False From noreply at buildbot.pypy.org Sun Jul 7 02:16:13 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 02:16:13 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Fix rpython/rlib tests Message-ID: <20130707001613.3C9BC1C00B9@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65246:05cb70729561 Date: 2013-07-07 02:15 +0200 http://bitbucket.org/pypy/pypy/changeset/05cb70729561/ Log: Fix rpython/rlib tests diff --git a/rpython/rlib/test/test_rawstorage.py b/rpython/rlib/test/test_rawstorage.py --- a/rpython/rlib/test/test_rawstorage.py +++ b/rpython/rlib/test/test_rawstorage.py @@ -1,8 +1,8 @@ -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\ raw_storage_setitem, raw_storage_getitem -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest def test_untranslated_storage(): r = alloc_raw_storage(15) @@ -11,7 +11,7 @@ free_raw_storage(r) assert res == 1<<30 -class TestRawStorage(BaseRtypingTest, LLRtypeMixin): +class TestRawStorage(BaseRtypingTest): def test_storage_int(self): def f(i): r = alloc_raw_storage(24) diff --git a/rpython/rlib/test/test_rstring.py b/rpython/rlib/test/test_rstring.py --- a/rpython/rlib/test/test_rstring.py +++ b/rpython/rlib/test/test_rstring.py @@ -2,7 +2,7 @@ from rpython.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit from rpython.rlib.rstring import replace, startswith, endswith -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest def test_split(): assert split("", 'x') == [''] @@ -163,7 +163,7 @@ assert isinstance(s.build(), unicode) -class TestTranslates(LLRtypeMixin, BaseRtypingTest): +class TestTranslates(BaseRtypingTest): def test_split_rsplit(self): def fn(): res = True diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -7,7 +7,7 @@ from rpython.rlib import streamio -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest class TSource(streamio.Stream): @@ -113,7 +113,7 @@ def flush(self): pass - + class TReaderWriter(TWriter): def read(self, n=-1): @@ -130,7 +130,7 @@ result = self.buf[start: stop] self.pos += n return result - + class BaseTestBufferingInputStreamTests(BaseRtypingTest): packets = ["a", "b", "\n", "def", "\nxy\npq\nuv", "wx"] @@ -247,7 +247,7 @@ return blocks == ["ab\nd", "ef\nx", "y\npq", "\nuvw", "x"] res = self.interpret(f, []) assert res - + def test_read_4_after_readline(self): file = self.makeStream() def f(): @@ -360,8 +360,7 @@ for seekto in range(0, end+1) for whence in [0, 1, 2]] random.shuffle(cases) - if isinstance(self, (LLRtypeMixin, OORtypeMixin)): - cases = cases[:7] # pick some cases at random - too slow! + cases = cases[:7] # pick some cases at random - too slow! def f(): all = file.readall() assert end == len(all) @@ -393,8 +392,7 @@ for seekto in range(0, end+1) for whence in [0, 1, 2]] random.shuffle(cases) - if isinstance(self, (LLRtypeMixin, OORtypeMixin)): - cases = cases[:7] # pick some cases at random - too slow! + cases = cases[:7] # pick some cases at random - too slow! def f(): for readto, seekto, whence in cases: base = TSource(self.packets, seek=False) @@ -428,12 +426,7 @@ def interpret(self, func, args, **kwds): return func(*args) -class TestBufferingInputStreamTestsLLinterp(BaseTestBufferingInputStreamTests, - LLRtypeMixin): - pass - -class TestBufferingInputStreamTestsOOinterp(BaseTestBufferingInputStreamTests, - OORtypeMixin): +class TestBufferingInputStreamTestsLLinterp(BaseTestBufferingInputStreamTests): pass class TestBufferedRead: @@ -452,7 +445,6 @@ return streamio.BufferingInputStream(base, bufsize) def test_dont_read_small(self): - import sys file = self.makeStream(bufsize=4) while file.read(1): pass for want, got, pos in self.source.chunks: @@ -527,12 +519,7 @@ def interpret(self, func, args, **kwds): return func(*args) -class TestBufferingOutputStreamLLinterp(BaseTestBufferingOutputStream, - LLRtypeMixin): - pass - -class TestBufferingOutputStreamOOinterp(BaseTestBufferingOutputStream, - OORtypeMixin): +class TestBufferingOutputStreamLLinterp(BaseTestBufferingOutputStream): pass @@ -572,12 +559,7 @@ def interpret(self, func, args, **kwds): return func(*args) -class TestLineBufferingOutputStreamLLinterp(BaseTestLineBufferingOutputStream, - LLRtypeMixin): - pass - -class TestLineBufferingOutputStreamOOinterp(BaseTestLineBufferingOutputStream, - OORtypeMixin): +class TestLineBufferingOutputStreamLLinterp(BaseTestLineBufferingOutputStream): pass @@ -601,10 +583,7 @@ def interpret(self, func, args, **kwds): return func(*args) -class TestCRLFFilterLLinterp(BaseTestCRLFFilter, LLRtypeMixin): - pass - -class TestCRLFFilterOOinterp(BaseTestCRLFFilter, OORtypeMixin): +class TestCRLFFilterLLinterp(BaseTestCRLFFilter): pass class BaseTestTextCRLFFilter(BaseRtypingTest): @@ -639,7 +618,7 @@ break crlf.seek(pos, 0) line2 = crlf.readline() - assert line2 == line + assert line2 == line lines.append(line) assert lines == expected self.interpret(f, []) @@ -678,12 +657,10 @@ assert line == '' self.interpret(f, []) -class TestTextCRLFFilterLLInterp(BaseTestTextCRLFFilter, LLRtypeMixin): +class TestTextCRLFFilterLLInterp(BaseTestTextCRLFFilter): pass - -class TestTextCRLFFilterOOInterp(BaseTestTextCRLFFilter, OORtypeMixin): - pass - + + class TestMMapFile(BaseTestBufferingInputStreamTests): tfn = None fd = None @@ -783,11 +760,7 @@ return func(*args) class TestBufferingInputOutputStreamTestsLLinterp( - BaseTestBufferingInputOutputStreamTests, LLRtypeMixin): - pass - -class TestBufferingInputOutputStreamTestsOOinterp( - BaseTestBufferingInputOutputStreamTests, OORtypeMixin): + BaseTestBufferingInputOutputStreamTests): pass @@ -895,15 +868,12 @@ assert filter.getnewlines() == e self.interpret(f, []) - + class TestTextInputFilter(BaseTestTextInputFilter): def interpret(self, func, args): return func(*args) -class TestTextInputFilterLLinterp(BaseTestTextInputFilter, LLRtypeMixin): - pass - -class TestTextInputFilterOOinterp(BaseTestTextInputFilter, OORtypeMixin): +class TestTextInputFilterLLinterp(BaseTestTextInputFilter): pass @@ -983,10 +953,7 @@ def interpret(self, func, args): return func(*args) -class TestTextOutputFilterLLinterp(BaseTestTextOutputFilter, LLRtypeMixin): - pass - -class TestTextOutputFilterOOinterp(BaseTestTextOutputFilter, OORtypeMixin): +class TestTextOutputFilterLLinterp(BaseTestTextOutputFilter): pass @@ -1007,7 +974,7 @@ bufs.append(c) assert u"".join(bufs) == chars -class TestEncodingOutputFilterTests: +class TestEncodingOutputFilterTests: def test_write(self): chars = u"abc\xff\u1234\u4321\x80xyz" @@ -1161,7 +1128,7 @@ filemode = os.O_RDONLY if "w" in mode: filemode |= os.O_WRONLY - + fd = os.open(fn, filemode) base = streamio.DiskFile(fd) return streamio.BufferingInputStream(base) From noreply at buildbot.pypy.org Sun Jul 7 03:50:28 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 03:50:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't disable the whole test collection on non-ARM CPUs Message-ID: <20130707015028.723E61C1471@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r65247:3fc9da9637c3 Date: 2013-07-07 03:49 +0200 http://bitbucket.org/pypy/pypy/changeset/3fc9da9637c3/ Log: Don't disable the whole test collection on non-ARM CPUs diff --git a/rpython/jit/backend/arm/test/conftest.py b/rpython/jit/backend/arm/test/conftest.py --- a/rpython/jit/backend/arm/test/conftest.py +++ b/rpython/jit/backend/arm/test/conftest.py @@ -16,7 +16,5 @@ dest="run_translation_tests", help="run tests that translate code") -def pytest_collect_directory(path, parent): - if not cpu.startswith('arm'): - py.test.skip("ARM(v7) tests skipped: cpu is %r" % (cpu,)) -pytest_collect_file = pytest_collect_directory +def pytest_ignore_collect(path, config): + return not cpu.startswith('arm') From noreply at buildbot.pypy.org Sun Jul 7 09:38:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 7 Jul 2013 09:38:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Give multithreading as an example Message-ID: <20130707073840.156D31C30E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65248:f5c4d57a8dc3 Date: 2013-07-07 09:37 +0200 http://bitbucket.org/pypy/pypy/changeset/f5c4d57a8dc3/ Log: Give multithreading as an example diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -357,10 +357,13 @@ mainstream and where they will get help from many people.* *If anybody seriously wants to promote RPython anyway, he is welcome - to: we won't actively resist such a plan. This is open source, which - means that anybody is free to promote and develop anything; but it - also means that you must let us choose* not *to go into that direction - ourselves.* + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? From noreply at buildbot.pypy.org Sun Jul 7 14:22:21 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 14:22:21 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: hg merge default Message-ID: <20130707122221.1BCE51C0512@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65249:85e691b55007 Date: 2013-07-07 03:56 +0200 http://bitbucket.org/pypy/pypy/changeset/85e691b55007/ Log: hg merge default diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -322,8 +335,32 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. This is open source, which + means that anybody is free to promote and develop anything; but it + also means that you must let us choose* not *to go into that direction + ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -131,18 +131,19 @@ def has_id(self, id): return id in self.ids - def _ops_for_chunk(self, chunk, include_debug_merge_points): + def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point' or include_debug_merge_points: + if op.name != 'debug_merge_point' and \ + (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op - def _allops(self, include_debug_merge_points=False, opcode=None): + def _allops(self, opcode=None, include_guard_not_invalidated=True): opcode_name = opcode for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode_name is None or \ (opcode and opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: for op in chunk.operations: @@ -162,15 +163,15 @@ def print_ops(self, *args, **kwds): print self.format_ops(*args, **kwds) - def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): + def _ops_by_id(self, id, include_guard_not_invalidated=True, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] - loop_ops = self.allops(include_debug_merge_points, opcode) + loop_ops = self.allops(opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): if op in loop_ops: yield op diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -223,5 +223,5 @@ log = self.run(main, [1000]) assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) - ops = loop.ops_by_id('getitem') + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) assert log.opnames(ops) == [] diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr), p25, 16, ConstPtr(ptr70), descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) diff --git a/rpython/jit/backend/arm/test/conftest.py b/rpython/jit/backend/arm/test/conftest.py --- a/rpython/jit/backend/arm/test/conftest.py +++ b/rpython/jit/backend/arm/test/conftest.py @@ -16,7 +16,5 @@ dest="run_translation_tests", help="run tests that translate code") -def pytest_collect_directory(path, parent): - if not cpu.startswith('arm'): - py.test.skip("ARM(v7) tests skipped: cpu is %r" % (cpu,)) -pytest_collect_file = pytest_collect_directory +def pytest_ignore_collect(path, config): + return not cpu.startswith('arm') From noreply at buildbot.pypy.org Sun Jul 7 14:22:22 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 14:22:22 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove rpython.jit.backend.cli Message-ID: <20130707122222.96BFE1C13AA@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65250:f412e1a0c029 Date: 2013-07-07 05:13 +0200 http://bitbucket.org/pypy/pypy/changeset/f412e1a0c029/ Log: Remove rpython.jit.backend.cli diff --git a/rpython/jit/backend/cli/README.txt b/rpython/jit/backend/cli/README.txt deleted file mode 100644 --- a/rpython/jit/backend/cli/README.txt +++ /dev/null @@ -1,23 +0,0 @@ -What you need to run CLI jit tests -================================== - -Translated tests ------------------ - -Recent versions of mono contains a bug that prevents jit tests to run correctly: -http://bugzilla.novell.com/show_bug.cgi?id=474718 - -To run them, you either need: - - - an old version of mono (1.9 is known to work; probably versions up to 2.2 - works too but I'm not sure) - - - to run mono with -O=-branch; something like alias mono="mono -O=-branch" - should work, but I never tried - - -Direct tests ------------- - -You need Pythonnet: instructions on how to install it are here: -http://codespeak.net/pypy/dist/pypy/doc/cli-backend.html diff --git a/rpython/jit/backend/cli/__init__.py b/rpython/jit/backend/cli/__init__.py deleted file mode 100644 diff --git a/rpython/jit/backend/cli/method.py b/rpython/jit/backend/cli/method.py deleted file mode 100644 --- a/rpython/jit/backend/cli/method.py +++ /dev/null @@ -1,863 +0,0 @@ -import py -import os -from rpython.rlib.debug import debug_start, debug_stop -from rpython.tool.pairtype import extendabletype -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli import dotnet -from rpython.translator.cli.dotnet import CLR -from rpython.translator.cli import opcodes -from rpython.jit.metainterp import history -from rpython.jit.metainterp.history import (AbstractValue, Const, ConstInt, ConstFloat, - ConstObj, BoxInt, LoopToken) -from rpython.jit.metainterp.resoperation import rop, opname -from rpython.jit.metainterp.typesystem import oohelper -from rpython.jit.backend.cli import runner -from rpython.jit.backend.cli.methodfactory import get_method_wrapper - -System = CLR.System -OpCodes = System.Reflection.Emit.OpCodes -LoopDelegate = CLR.pypy.runtime.LoopDelegate -DelegateHolder = CLR.pypy.runtime.DelegateHolder -InputArgs = CLR.pypy.runtime.InputArgs -ListOfVoid = CLR.pypy.runtime.ListOfVoid -Utils = CLR.pypy.runtime.Utils - -cVoid = ootype.nullruntimeclass - -class __extend__(AbstractValue): - __metaclass__ = extendabletype - - def getCliType(self, meth): - if self in meth.box2type: - return meth.box2type[self] - - if self.type == history.INT: - return dotnet.typeof(System.Int32) - elif self.type == history.FLOAT: - return dotnet.typeof(System.Double) - elif self.type == history.REF: - return dotnet.typeof(System.Object) - else: - assert False, 'Unknown type: %s' % self.type - - def load(self, meth): - v = meth.var_for_box(self) - meth.il.Emit(OpCodes.Ldloc, v) - - def store(self, meth): - v = meth.var_for_box(self) - meth.il.Emit(OpCodes.Stloc, v) - - -class __extend__(Const): - __metaclass__ = extendabletype - - def load(self, meth): - index = meth.get_index_for_constant(self) - meth.av_consts.load(meth) - meth.il.Emit(OpCodes.Ldc_I4, index) - meth.il.Emit(OpCodes.Ldelem_Ref) - - def store(self, meth): - assert False, 'cannot store() to Constant' - - def get_cliobj(self): - return dotnet.cast_to_native_object(self.getref_base()) - -class __extend__(ConstInt): - __metaclass__ = extendabletype - - def load(self, meth): - meth.il.Emit(OpCodes.Ldc_I4, self.value) - - -class __extend__(ConstFloat): - __metaclass__ = extendabletype - - def load(self, meth): - # we cannot invoke il.Emit(Ldc_R8, self.value) directly because - # pythonnet would select the wrong overload. The C# version works - # arond it - Utils.Emit_Ldc_R8(meth.il, self.value); - - -class ConstFunction(Const): - - def __init__(self, name): - self.name = name - self.holder = DelegateHolder() - - def get_cliobj(self): - return dotnet.cliupcast(self.holder, System.Object) - - def load(self, meth): - holdertype = self.holder.GetType() - funcfield = holdertype.GetField('func') - Const.load(self, meth) - meth.il.Emit(OpCodes.Castclass, holdertype) - meth.il.Emit(OpCodes.Ldfld, funcfield) - meth.il.Emit(OpCodes.Castclass, dotnet.typeof(LoopDelegate)) - - def _getrepr_(self): - return '' % self.name - - def __hash__(self): - return hash(self.holder) - - -class MethodArgument(AbstractValue): - def __init__(self, index, cliType): - self.index = index - self.cliType = cliType - - def getCliType(self, meth): - return self.cliType - - def load(self, meth): - if self.index == 0: - meth.il.Emit(OpCodes.Ldarg_0) - elif self.index == 1: - meth.il.Emit(OpCodes.Ldarg_1) - elif self.index == 2: - meth.il.Emit(OpCodes.Ldarg_2) - elif self.index == 3: - meth.il.Emit(OpCodes.Ldarg_3) - else: - meth.il.Emit(OpCodes.Ldarg, self.index) - - def store(self, meth): - meth.il.Emit(OpCodes.Starg, self.index) - - def __repr__(self): - return "MethodArgument(%d)" % self.index - - -class Method(object): - - operations = [] # overwritten at the end of the module - debug = False - tailcall = True - nocast = True - - def __init__(self, cpu, cliloop): - self.setoptions() - self.cpu = cpu - self.name = cliloop.get_fresh_cli_name() - self.cliloop = cliloop - self.boxes = {} # box --> local var - self.branches = [] - self.branchlabels = [] - self.consts = {} # object --> index - self.meth_wrapper = self._get_meth_wrapper() - self.il = self.meth_wrapper.get_il_generator() - self.av_consts = MethodArgument(0, System.Type.GetType("System.Object[]")) - t_InputArgs = dotnet.typeof(InputArgs) - self.av_inputargs = MethodArgument(1,t_InputArgs ) - self.av_ovf_flag = BoxInt() - self.exc_value_field = t_InputArgs.GetField('exc_value') - if cpu.rtyper: - self.av_OverflowError = ConstObj(ootype.cast_to_object(cpu.ll_ovf_exc)) - self.av_ZeroDivisionError = ConstObj(ootype.cast_to_object(cpu.ll_zero_exc)) - else: - self.av_OverflowError = None - self.av_ZeroDivisionError = None - self.box2type = {} - - def compile(self): - # ---- - debug_start('jit-backend-emit_ops') - if self.nocast: - self.compute_types() - self.emit_load_inputargs() - self.emit_preamble() - self.emit_operations(self.cliloop.operations) - self.emit_branches() - self.emit_end() - debug_stop('jit-backend-emit_ops') - # ---- - debug_start('jit-backend-finish_code') - res = self.finish_code() - debug_stop('jit-backend-finish_code') - return res - - def _parseopt(self, text): - text = text.lower() - if text[0] == '-': - return text[1:], False - elif text[0] == '+': - return text[1:], True - else: - return text, True - - def setoptions(self): - opts = os.environ.get('PYPYJITOPT') - if not opts: - return - parts = opts.split(' ') - for part in parts: - name, value = self._parseopt(part) - if name == 'debug': - self.debug = value - elif name == 'tailcall': - self.tailcall = value - elif name == 'nocast': - self.nocast = value - else: - os.write(2, 'Warning: invalid option name: %s\n' % name) - - def _collect_types(self, operations, box2classes): - for op in operations: - if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC): - box = op.args[0] - descr = op.getdescr() - assert isinstance(descr, runner.FieldDescr) - box2classes.setdefault(box, []).append(descr.selfclass) - if op in self.cliloop.guard2ops: - _, suboperations = self.cliloop.guard2ops[op] - self._collect_types(suboperations, box2classes) - - def compute_types(self): - box2classes = {} # box --> [ootype.Class] - self._collect_types(self.cliloop.operations, box2classes) - for box, classes in box2classes.iteritems(): - cls = classes[0] - for cls2 in classes[1:]: - if ootype.subclassof(cls, cls2): - cls = cls2 - else: - assert ootype.subclassof(cls2, cls) - self.box2type[box] = dotnet.class2type(cls) - - def finish_code(self): - delegatetype = dotnet.typeof(LoopDelegate) - # initialize the array of genconsts - consts = dotnet.new_array(System.Object, len(self.consts)) - for av_const, i in self.consts.iteritems(): - consts[i] = av_const.get_cliobj() - # build the delegate - func = self.meth_wrapper.create_delegate(delegatetype, consts) - return dotnet.clidowncast(func, LoopDelegate) - - def _get_meth_wrapper(self): - restype = dotnet.class2type(cVoid) - args = self._get_args_array([dotnet.typeof(InputArgs)]) - return get_method_wrapper(self.name, restype, args) - - def _get_args_array(self, arglist): - array = dotnet.new_array(System.Type, len(arglist)+1) - array[0] = System.Type.GetType("System.Object[]") - for i in range(len(arglist)): - array[i+1] = arglist[i] - return array - - def var_for_box(self, box): - try: - return self.boxes[box] - except KeyError: - v = self.il.DeclareLocal(box.getCliType(self)) - self.boxes[box] = v - return v - - def match_var_fox_boxes(self, failargs, inputargs): - failargs = [arg for arg in failargs if arg is not None] - assert len(failargs) == len(inputargs) - for i in range(len(failargs)): - v = self.boxes[failargs[i]] - self.boxes[inputargs[i]] = v - - def get_index_for_failing_op(self, op): - try: - return self.cpu.failing_ops.index(op) - except ValueError: - self.cpu.failing_ops.append(op) - return len(self.cpu.failing_ops)-1 - - def get_index_for_constant(self, obj): - try: - return self.consts[obj] - except KeyError: - index = len(self.consts) - self.consts[obj] = index - return index - - def newbranch(self, op): - # sanity check, maybe we can remove it later - for myop in self.branches: - assert myop is not op - il_label = self.il.DefineLabel() - self.branches.append(op) - self.branchlabels.append(il_label) - return il_label - - def get_inputarg_field(self, type): - t = dotnet.typeof(InputArgs) - if type == history.INT: - fieldname = 'ints' - elif type == history.FLOAT: - fieldname = 'floats' - elif type == history.REF: - fieldname = 'objs' - else: - assert False, 'Unknown type %s' % type - return t.GetField(fieldname) - - def load_inputarg(self, i, type, clitype): - field = self.get_inputarg_field(type) - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldfld, field) - self.il.Emit(OpCodes.Ldc_I4, i) - self.il.Emit(OpCodes.Ldelem, clitype) - - def store_inputarg(self, i, type, clitype, valuebox): - field = self.get_inputarg_field(type) - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldfld, field) - self.il.Emit(OpCodes.Ldc_I4, i) - valuebox.load(self) - self.il.Emit(OpCodes.Stelem, clitype) - - def emit_load_inputargs(self): - self.emit_debug("executing: " + self.name) - i = 0 - for box in self.cliloop.inputargs: - self.load_inputarg(i, box.type, box.getCliType(self)) - box.store(self) - i+=1 - - def emit_preamble(self): - self.il_loop_start = self.il.DefineLabel() - self.il.MarkLabel(self.il_loop_start) - - def emit_operations(self, oplist): - self.i = 0 - self.oplist = oplist - N = len(oplist) - while self.i < N: - op = oplist[self.i] - self.emit_debug(op.repr()) - func = self.operations[op.getopnum()] - assert func is not None - func(self, op) - self.i += 1 - - def emit_branches(self): - while self.branches: - branches = self.branches - branchlabels = self.branchlabels - self.branches = [] - self.branchlabels = [] - assert len(branches) == len(branchlabels) - for i in range(len(branches)): - op = branches[i] - il_label = branchlabels[i] - self.il.MarkLabel(il_label) - self.emit_guard_subops(op) - - def emit_guard_subops(self, op): - assert op.is_guard() - if op in self.cliloop.guard2ops: - inputargs, suboperations = self.cliloop.guard2ops[op] - self.match_var_fox_boxes(op.getfailargs(), inputargs) - self.emit_operations(suboperations) - else: - self.emit_return_failed_op(op, op.getfailargs()) - - def emit_end(self): - assert self.branches == [] - self.il.Emit(OpCodes.Ret) - - # ----------------------------- - - def push_all_args(self, op): - for box in op.args: - box.load(self) - - def push_arg(self, op, n): - op.args[n].load(self) - - def store_result(self, op): - op.result.store(self) - - def emit_debug(self, msg): - if self.debug: - self.il.EmitWriteLine(msg) - - def emit_clear_exception(self): - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldnull) - self.il.Emit(OpCodes.Stfld, self.exc_value_field) - # clear the overflow flag - self.il.Emit(OpCodes.Ldc_I4_0) - self.av_ovf_flag.store(self) - - def emit_raising_op(self, op, emit_op, exctypes): - self.emit_clear_exception() - lbl = self.il.BeginExceptionBlock() - emit_op(self, op) - self.il.Emit(OpCodes.Leave, lbl) - for exctype in exctypes: - v = self.il.DeclareLocal(exctype) - self.il.BeginCatchBlock(exctype) - if exctype == dotnet.typeof(System.OverflowException) and self.av_OverflowError: - self.il.Emit(OpCodes.Ldc_I4_1) - self.av_ovf_flag.store(self) - else: - self.il.Emit(OpCodes.Stloc, v) - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldloc, v) - self.il.Emit(OpCodes.Stfld, self.exc_value_field) - self.il.EndExceptionBlock() - - def emit_ovf_op(self, op, emit_op): - next_op = self.oplist[self.i+1] - if next_op.getopnum() == rop.GUARD_NO_OVERFLOW: - self.i += 1 - self.emit_ovf_op_and_guard(op, next_op, emit_op) - return - # clear the overflow flag - self.il.Emit(OpCodes.Ldc_I4_0) - self.av_ovf_flag.store(self) - lbl = self.il.BeginExceptionBlock() - emit_op(self, op) - self.il.Emit(OpCodes.Leave, lbl) - self.il.BeginCatchBlock(dotnet.typeof(System.OverflowException)) - self.il.Emit(OpCodes.Ldc_I4_1) - self.av_ovf_flag.store(self) - self.il.EndExceptionBlock() - - def emit_ovf_op_and_guard(self, op, opguard, emit_op): - # emit the checked operation - lbl = self.il.BeginExceptionBlock() - emit_op(self, op) - self.il.Emit(OpCodes.Leave, lbl) - self.il.BeginCatchBlock(dotnet.typeof(System.OverflowException)) - # emit the guard - assert len(opguard.args) == 0 - il_label = self.newbranch(opguard) - self.il.Emit(OpCodes.Leave, il_label) - self.il.EndExceptionBlock() - - def mark(self, msg): - self.il.Emit(OpCodes.Ldstr, msg) - self.il.Emit(OpCodes.Pop) - - # -------------------------------- - - def emit_return_failed_op(self, op, args): - # store the index of the failed op - index_op = self.get_index_for_failing_op(op) - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldc_I4, index_op) - field = dotnet.typeof(InputArgs).GetField('failed_op') - self.il.Emit(OpCodes.Stfld, field) - self.emit_store_opargs(args) - self.il.Emit(OpCodes.Ret) - - def emit_op_finish(self, op): - self.emit_return_failed_op(op, op.args) - - def emit_store_opargs(self, args): - # store the latest values - i = 0 - for box in args: - if box is not None: - self.store_inputarg(i, box.type, box.getCliType(self), box) - i+=1 - - def emit_guard_bool(self, op, opcode): - assert len(op.args) == 1 - il_label = self.newbranch(op) - op.args[0].load(self) - self.il.Emit(opcode, il_label) - - def emit_op_guard_true(self, op): - self.emit_guard_bool(op, OpCodes.Brfalse) - - def emit_op_guard_false(self, op): - self.emit_guard_bool(op, OpCodes.Brtrue) - - def emit_op_guard_nonnull(self, op): - self.emit_guard_bool(op, OpCodes.Brfalse) - - def emit_op_guard_isnull(self, op): - self.emit_guard_bool(op, OpCodes.Brtrue) - - def emit_op_guard_value(self, op): - assert len(op.args) == 2 - il_label = self.newbranch(op) - self.push_all_args(op) - self.il.Emit(OpCodes.Bne_Un, il_label) - - def emit_op_guard_class(self, op): - assert len(op.args) == 2 - il_label = self.newbranch(op) - self.push_arg(op, 0) - meth = dotnet.typeof(System.Object).GetMethod("GetType") - self.il.Emit(OpCodes.Callvirt, meth) - self.push_arg(op, 1) - self.il.Emit(OpCodes.Bne_Un, il_label) - - def emit_op_guard_nonnull_class(self, op): - assert len(op.args) == 2 - il_label = self.newbranch(op) - # nonnull check - self.push_arg(op, 0) - self.il.Emit(OpCodes.Brfalse, il_label) - # class check - self.push_arg(op, 0) - meth = dotnet.typeof(System.Object).GetMethod("GetType") - self.il.Emit(OpCodes.Callvirt, meth) - self.push_arg(op, 1) - self.il.Emit(OpCodes.Bne_Un, il_label) - - def emit_op_guard_no_exception(self, op): - il_label = self.newbranch(op) - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldfld, self.exc_value_field) - self.il.Emit(OpCodes.Brtrue, il_label) - - def emit_op_guard_exception(self, op): - il_label = self.newbranch(op) - classbox = op.args[0] - assert isinstance(classbox, ConstObj) - oocls = classbox.getref(ootype.Class) - clitype = dotnet.class2type(oocls) - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldfld, self.exc_value_field) - self.il.Emit(OpCodes.Isinst, clitype) - self.il.Emit(OpCodes.Brfalse, il_label) - # the guard succeeded, store the result - self.av_inputargs.load(self) - self.il.Emit(OpCodes.Ldfld, self.exc_value_field) - self.store_result(op) - - def emit_guard_overflow_impl(self, op, opcode): - assert len(op.args) == 0 - il_label = self.newbranch(op) - self.av_ovf_flag.load(self) - self.il.Emit(opcode, il_label) - - def emit_op_guard_no_overflow(self, op): - self.emit_guard_overflow_impl(op, OpCodes.Brtrue) - - def emit_op_guard_overflow(self, op): - self.emit_guard_overflow_impl(op, OpCodes.Brfalse) - - def emit_op_jump(self, op): - target_token = op.getdescr() - assert isinstance(target_token, LoopToken) - if target_token.cliloop is self.cliloop: - # jump to the beginning of the loop - i = 0 - for i in range(len(op.args)): - op.args[i].load(self) - self.cliloop.inputargs[i].store(self) - self.il.Emit(OpCodes.Br, self.il_loop_start) - else: - # it's a real bridge - cliloop = target_token.cliloop - assert len(op.args) == len(cliloop.inputargs) - self.emit_debug('jumping to ' + cliloop.name) - self.emit_store_opargs(op.args) - cliloop.funcbox.load(self) - self.av_inputargs.load(self) - methinfo = dotnet.typeof(LoopDelegate).GetMethod('Invoke') - if self.tailcall: - self.il.Emit(OpCodes.Tailcall) - self.il.Emit(OpCodes.Callvirt, methinfo) - self.il.Emit(OpCodes.Ret) - - def emit_op_new_with_vtable(self, op): - clsbox = op.args[0] - assert isinstance(clsbox, ConstObj) - cls = clsbox.getref_base() - descr = self.cpu.class_sizes[cls] - assert isinstance(descr, runner.TypeDescr) - clitype = descr.get_clitype() - ctor_info = descr.get_constructor_info() - self.il.Emit(OpCodes.Newobj, ctor_info) - self.store_result(op) - - def emit_op_runtimenew(self, op): - clitype_utils = dotnet.typeof(Utils) - methinfo = clitype_utils.GetMethod('RuntimeNew') - op.args[0].load(self) - self.il.Emit(OpCodes.Call, methinfo) - self.store_result(op) - - def emit_op_instanceof(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.TypeDescr) - clitype = descr.get_clitype() - op.args[0].load(self) - self.il.Emit(OpCodes.Isinst, clitype) - self.il.Emit(OpCodes.Ldnull) - self.il.Emit(OpCodes.Cgt_Un) - self.store_result(op) - - def emit_op_subclassof(self, op): - clitype_utils = dotnet.typeof(Utils) - methinfo = clitype_utils.GetMethod('SubclassOf') - op.args[0].load(self) - op.args[1].load(self) - self.il.Emit(OpCodes.Call, methinfo) - self.store_result(op) - - def emit_op_call_impl(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.StaticMethDescr) - delegate_type = descr.get_delegate_clitype() - meth_invoke = descr.get_meth_info() - self._emit_call(op, OpCodes.Callvirt, delegate_type, - meth_invoke, descr.has_result) - - def emit_op_call(self, op): - emit_op = Method.emit_op_call_impl.im_func - exctypes = [dotnet.typeof(System.Exception)] - self.emit_raising_op(op, emit_op, exctypes) - - emit_op_call_pure = emit_op_call - - def emit_op_oosend(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.MethDescr) - clitype = descr.get_self_clitype() - methinfo = descr.get_meth_info() - opcode = descr.get_call_opcode() - self._emit_call(op, opcode, clitype, methinfo, descr.has_result) - - emit_op_oosend_pure = emit_op_oosend - - def _emit_call(self, op, opcode, clitype, methinfo, has_result): - av_sm, args_av = op.args[0], op.args[1:] - av_sm.load(self) - self.il.Emit(OpCodes.Castclass, clitype) - for av_arg in args_av: - av_arg.load(self) - self.il.Emit(opcode, methinfo) - if has_result: - self.store_result(op) - - def emit_op_getfield_gc(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.FieldDescr) - clitype = descr.get_self_clitype() - fieldinfo = descr.get_field_info() - obj = op.args[0] - obj.load(self) - if obj.getCliType(self) is not clitype: - self.il.Emit(OpCodes.Castclass, clitype) - self.il.Emit(OpCodes.Ldfld, fieldinfo) - self.store_result(op) - - emit_op_getfield_gc_pure = emit_op_getfield_gc - - def emit_op_setfield_gc(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.FieldDescr) - clitype = descr.get_self_clitype() - fieldinfo = descr.get_field_info() - obj = op.args[0] - obj.load(self) - if obj.getCliType(self) is not clitype: - self.il.Emit(OpCodes.Castclass, clitype) - op.args[1].load(self) - self.il.Emit(OpCodes.Stfld, fieldinfo) - - def emit_op_getarrayitem_gc(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.TypeDescr) - clitype = descr.get_array_clitype() - itemtype = descr.get_clitype() - op.args[0].load(self) - self.il.Emit(OpCodes.Castclass, clitype) - op.args[1].load(self) - self.il.Emit(OpCodes.Ldelem, itemtype) - self.store_result(op) - - emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc - - def emit_op_setarrayitem_gc(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.TypeDescr) - clitype = descr.get_array_clitype() - itemtype = descr.get_clitype() - op.args[0].load(self) - self.il.Emit(OpCodes.Castclass, clitype) - op.args[1].load(self) - op.args[2].load(self) - self.il.Emit(OpCodes.Stelem, itemtype) - - def emit_op_arraylen_gc(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.TypeDescr) - clitype = descr.get_array_clitype() - op.args[0].load(self) - self.il.Emit(OpCodes.Castclass, clitype) - self.il.Emit(OpCodes.Ldlen) - self.store_result(op) - - def emit_op_new_array(self, op): - descr = op.getdescr() - assert isinstance(descr, runner.TypeDescr) - item_clitype = descr.get_clitype() - if item_clitype is None: - return self.emit_new_arrayofvoids(op) - op.args[0].load(self) - self.il.Emit(OpCodes.Newarr, item_clitype) - self.store_result(op) - - def emit_new_arrayofvoids(self, op): - clitype = dotnet.typeof(ListOfVoid) - ctor = clitype.GetConstructor(dotnet.new_array(System.Type, 0)) - _ll_resize = clitype.GetMethod('_ll_resize') - self.il.Emit(OpCodes.Newobj, ctor) - self.il.Emit(OpCodes.Dup) - op.args[0].load(self) - self.il.Emit(OpCodes.Callvirt, _ll_resize) - self.store_result(op) - - def emit_op_debug_merge_point(self, op): - pass - - def lltype_only(self, op): - print 'Operation %s is lltype specific, should not get here!' % op.getopname() - raise NotImplementedError - - emit_op_new = lltype_only - emit_op_setfield_raw = lltype_only - emit_op_getfield_raw = lltype_only - emit_op_getfield_raw_pure = lltype_only - emit_op_strsetitem = lltype_only - emit_op_unicodesetitem = lltype_only - emit_op_cast_int_to_ptr = lltype_only - emit_op_cast_ptr_to_int = lltype_only - emit_op_newstr = lltype_only - emit_op_strlen = lltype_only - emit_op_strgetitem = lltype_only - emit_op_newunicode = lltype_only - emit_op_unicodelen = lltype_only - emit_op_unicodegetitem = lltype_only - emit_op_cond_call_gc_wb = lltype_only - emit_op_setarrayitem_raw = lltype_only - - -# -------------------------------------------------------------------- - -# the follwing functions automatically build the various emit_op_* -# operations based on the definitions in translator/cli/opcodes.py - -def make_operation_list(): - operations = [None] * (rop._LAST+1) - for key, value in rop.__dict__.items(): - key = key.lower() - if key.startswith('_'): - continue - methname = 'emit_op_%s' % key - if hasattr(Method, methname): - func = getattr(Method, methname).im_func - else: - instrlist = opcodes.opcodes[key] - func = render_op(methname, instrlist) - operations[value] = func - return operations - -def is_raising_op(instrlist): - return len(instrlist) == 1 and isinstance(instrlist[0], opcodes.MapException) - -def render_op(methname, instrlist): - if is_raising_op(instrlist): - return render_raising_op(methname, instrlist) - lines = [] - for instr in instrlist: - if instr == opcodes.PushAllArgs: - lines.append('self.push_all_args(op)') - elif instr == opcodes.StoreResult: - lines.append('self.store_result(op)') - elif isinstance(instr, opcodes.PushArg): - lines.append('self.push_arg(op, %d)' % instr.n) - elif instr == 'ldc.r8 0': - lines.append('Utils.Emit_Ldc_R8(self.il, 0.0)') - else: - assert isinstance(instr, str), 'unknown instruction %s' % instr - if instr.startswith('call '): - signature = instr[len('call '):] - renderCall(lines, signature) - else: - attrname = opcode2attrname(instr) - lines.append('self.il.Emit(OpCodes.%s)' % attrname) - body = py.code.Source('\n'.join(lines)) - src = body.putaround('def %s(self, op):' % methname) - dic = {'OpCodes': OpCodes, - 'System': System, - 'Utils': Utils, - 'dotnet': dotnet} - exec src.compile() in dic - return dic[methname] - -def parse_exctype(exctype): - assert exctype.startswith('[mscorlib]') - return exctype[len('[mscorlib]'):] - - -def render_raising_op(methname, instrlist): - value = instrlist[0] - exctypes = [parse_exctype(exctype) for exctype, _ in value.mapping] - exctypes = ['dotnet.typeof(%s)' % exctype for exctype in exctypes] - is_ovf = (exctypes == ['dotnet.typeof(System.OverflowException)']) - impl_func = render_op(methname + '_impl', value.instr) - if not impl_func: - return - if is_ovf: - src = py.code.Source(""" - def %s(self, op): - self.emit_ovf_op(op, impl_func) - """ % (methname,)) - else: - src = py.code.Source(""" - def %s(self, op): - exctypes = [%s] - self.emit_raising_op(op, impl_func, exctypes) - """ % (methname, ', '.join(exctypes))) - dic = {'System': System, - 'dotnet': dotnet, - 'impl_func': impl_func} - exec src.compile() in dic - return dic[methname] - -def opcode2attrname(opcode): - if opcode == 'ldc.r8 0': - return 'Ldc_R8, 0' # XXX this is a hack - if opcode == 'ldc.i8 0': - return 'Ldc_I8, 0' # XXX this is a hack - parts = map(str.capitalize, opcode.split('.')) - return '_'.join(parts) - -def renderCall(body, signature): - # signature is like this: - # int64 class [mscorlib]System.Foo::Bar(int64, int32) - - typenames = { - 'int32': 'System.Int32', - 'int64': 'System.Int64', - 'float64': 'System.Double', - } - - restype, _, signature = signature.split(' ', 3) - assert signature.startswith('[mscorlib]'), 'external assemblies '\ - 'not supported' - signature = signature[len('[mscorlib]'):] - typename, signature = signature.split('::') - methname, signature = signature.split('(') - assert signature.endswith(')') - params = signature[:-1].split(',') - params = map(str.strip, params) - params = [typenames.get(p, p) for p in params] - params = ['dotnet.typeof(%s)' % p for p in params] - - body.append("t = System.Type.GetType('%s')" % typename) - body.append("params = dotnet.init_array(System.Type, %s)" % ', '.join(params)) - body.append("methinfo = t.GetMethod('%s', params)" % methname) - body.append("self.il.Emit(OpCodes.Call, methinfo)") - -Method.operations = make_operation_list() diff --git a/rpython/jit/backend/cli/methodfactory.py b/rpython/jit/backend/cli/methodfactory.py deleted file mode 100644 --- a/rpython/jit/backend/cli/methodfactory.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -from rpython.translator.cli.dotnet import CLR -from rpython.translator.cli import dotnet -System = CLR.System -Utils = CLR.pypy.runtime.Utils -AutoSaveAssembly = CLR.pypy.runtime.AutoSaveAssembly -MethodAttributes = System.Reflection.MethodAttributes -TypeAttributes = System.Reflection.TypeAttributes - -class AbstractMethodWrapper: - - def get_il_generator(self): - raise NotImplementedError - - def create_delegate(self, delegatetype, consts): - raise NotImplementedError - -class DynamicMethodWrapper(AbstractMethodWrapper): - - def __init__(self, name, res, args): - self.dynmeth = Utils.CreateDynamicMethod(name, res, args) - - def get_il_generator(self): - return self.dynmeth.GetILGenerator() - - def create_delegate(self, delegatetype, consts): - return self.dynmeth.CreateDelegate(delegatetype, consts) - - -# the assemblyData singleton contains the information about the -# assembly we are currently writing to -class AssemblyData: - assembly = None - name = None - methcount = 0 - - def is_enabled(self): - if self.name is None: - name = os.environ.get('PYPYJITLOG') - if name is None: - name = '' - self.name = name - return bool(self.name) - - def create(self): - assert self.is_enabled() - if self.assembly is None: - name = self.name - self.auto_save_assembly = AutoSaveAssembly.Create(name) - self.assembly = self.auto_save_assembly.GetAssemblyBuilder() - self.module = self.assembly.DefineDynamicModule(name) - -assemblyData = AssemblyData() - - -class AssemblyMethodWrapper(AbstractMethodWrapper): - - def __init__(self, name, res, args): - module = assemblyData.module - name = '%s_%d' % (name, assemblyData.methcount) - #self.name = name - assemblyData.methcount += 1 - self.typeBuilder = AutoSaveAssembly.DefineType(module, name) - self.meth = AutoSaveAssembly.DefineMethod(self.typeBuilder, - "invoke", res, args) - - - def get_il_generator(self): - return self.meth.GetILGenerator() - - def create_delegate(self, delegatetype, consts): - t = self.typeBuilder.CreateType() - methinfo = t.GetMethod("invoke") -## if self.name == 'Loop1(r0)_1': -## assemblyData.auto_save_assembly.Save() - return System.Delegate.CreateDelegate(delegatetype, - consts, - methinfo) - -def get_method_wrapper(name, res, args): - if assemblyData.is_enabled(): - assemblyData.create() - return AssemblyMethodWrapper(name, res, args) - else: - return DynamicMethodWrapper(name, res, args) - diff --git a/rpython/jit/backend/cli/runner.py b/rpython/jit/backend/cli/runner.py deleted file mode 100644 --- a/rpython/jit/backend/cli/runner.py +++ /dev/null @@ -1,532 +0,0 @@ -from rpython.tool.pairtype import extendabletype -from rpython.rtyper.ootypesystem import ootype -from rpython.rlib.objectmodel import we_are_translated -from rpython.jit.metainterp import history -from rpython.jit.metainterp.history import AbstractDescr, AbstractMethDescr -from rpython.jit.metainterp.history import AbstractFailDescr, LoopToken -from rpython.jit.metainterp.history import Box, BoxInt, BoxObj, ConstObj, Const -from rpython.jit.metainterp import executor -from rpython.jit.metainterp.resoperation import rop, opname -from rpython.jit.backend import model -from rpython.jit.backend.llgraph.runner import KeyManager -from rpython.translator.cli import dotnet -from rpython.translator.cli.dotnet import CLR -from rpython.jit.metainterp.typesystem import oohelper - -System = CLR.System -OpCodes = System.Reflection.Emit.OpCodes -InputArgs = CLR.pypy.runtime.InputArgs -cpypyString = dotnet.classof(CLR.pypy.runtime.String) - -LoopToken.cliloop = None -AbstractFailDescr._loop_token = None -AbstractFailDescr._guard_op = None - -class CliLoop(object): - - def __init__(self, name, inputargs, operations): - self.name = name - self.inputargs = inputargs - self.operations = operations - self.guard2ops = {} # guard_op --> (inputargs, operations) - self.funcbox = None - self.methcount = 0 - - def get_fresh_cli_name(self): - name = '%s(r%d)' % (self.name, self.methcount) - self.methcount += 1 - return name - - -class CliCPU(model.AbstractCPU): - - supports_floats = True - ts = oohelper - - def __init__(self, rtyper, stats, opts=None, translate_support_code=False, - mixlevelann=None, gcdescr=None): - model.AbstractCPU.__init__(self) - self.rtyper = rtyper - if rtyper: - assert rtyper.type_system.name == "ootypesystem" - self.loopcount = 0 - self.stats = stats - self.translate_support_code = translate_support_code - self.inputargs = None - self.failing_ops = [] # index --> op - self.ll_ovf_exc = self._get_prebuilt_exc(OverflowError) - self.ll_zero_exc = self._get_prebuilt_exc(ZeroDivisionError) - - def _get_prebuilt_exc(self, cls): - if self.rtyper is None: - return System.Exception() - else: - bk = self.rtyper.annotator.bookkeeper - clsdef = bk.getuniqueclassdef(cls) - return self.rtyper.exceptiondata.get_standard_ll_exc_instance( - self.rtyper, clsdef) - - def get_inputargs(self): - if self.inputargs is None: - self.inputargs = InputArgs() - return self.inputargs - - @staticmethod - def calldescrof(FUNC, ARGS, RESULT, extrainfo=None): - return StaticMethDescr.new(FUNC, ARGS, RESULT, extrainfo) - - @staticmethod - def methdescrof(SELFTYPE, methname): - if SELFTYPE in (ootype.String, ootype.Unicode): - return StringMethDescr.new(SELFTYPE, methname) - return MethDescr.new(SELFTYPE, methname) - - @staticmethod - def typedescrof(TYPE): - return TypeDescr.new(TYPE) - - @staticmethod - def arraydescrof(A): - assert isinstance(A, ootype.Array) - TYPE = A.ITEM - return TypeDescr.new(TYPE) - - @staticmethod - def fielddescrof(T, fieldname): - T1, _ = T._lookup_field(fieldname) - return FieldDescr.new(T1, fieldname) - - def typedescr2classbox(self, descr): - assert isinstance(descr, TypeDescr) - return ConstObj(ootype.cast_to_object(descr.ooclass)) - - # ---------------------- - - def _attach_token_to_faildescrs(self, token, operations): - for op in operations: - if op.is_guard(): - descr = op.getdescr() - assert isinstance(descr, AbstractFailDescr) - descr._loop_token = token - descr._guard_op = op - - def compile_loop(self, inputargs, operations, looptoken): - from rpython.jit.backend.cli.method import Method, ConstFunction - name = 'Loop%d' % self.loopcount - self.loopcount += 1 - cliloop = CliLoop(name, inputargs, operations) - looptoken.cliloop = cliloop - cliloop.funcbox = ConstFunction(cliloop.name) - self._attach_token_to_faildescrs(cliloop, operations) - meth = Method(self, cliloop) - cliloop.funcbox.holder.SetFunc(meth.compile()) - - def compile_bridge(self, faildescr, inputargs, operations): - from rpython.jit.backend.cli.method import Method - op = faildescr._guard_op - token = faildescr._loop_token - token.guard2ops[op] = (inputargs, operations) - self._attach_token_to_faildescrs(token, operations) - meth = Method(self, token) - token.funcbox.holder.SetFunc(meth.compile()) - return token - - def execute_token(self, looptoken): - cliloop = looptoken.cliloop - func = cliloop.funcbox.holder.GetFunc() - func(self.get_inputargs()) - op = self.failing_ops[self.inputargs.get_failed_op()] - return op.getdescr() - - def set_future_value_int(self, index, intvalue): - self.get_inputargs().set_int(index, intvalue) - - def set_future_value_float(self, index, intvalue): - self.get_inputargs().set_float(index, intvalue) - - def set_future_value_ref(self, index, objvalue): - obj = dotnet.cast_to_native_object(objvalue) - self.get_inputargs().set_obj(index, obj) - - def get_latest_value_int(self, index): - return self.get_inputargs().get_int(index) - - def get_latest_value_float(self, index): - return self.get_inputargs().get_float(index) - - def get_latest_value_ref(self, index): - obj = self.get_inputargs().get_obj(index) - return dotnet.cast_from_native_object(obj) - - def get_exception(self): - exc_value = self.get_inputargs().get_exc_value() - if exc_value: - exc_obj = dotnet.cast_from_native_object(exc_value) - exc_inst = ootype.cast_from_object(ootype.ROOT, exc_obj) - cls = ootype.classof(exc_value) - return ootype.cast_to_object(cls) - return ootype.cast_to_object(ootype.nullruntimeclass) - - def get_exc_value(self): - exc_value = self.get_inputargs().get_exc_value() - if exc_value: - return dotnet.cast_from_native_object(exc_value) - else: - return ootype.NULL - - def clear_exception(self): - self.get_inputargs().set_exc_value(None) - - def get_overflow_error(self): - exc_type = ootype.cast_to_object(ootype.classof(self.ll_ovf_exc)) - exc_value = ootype.cast_to_object(self.ll_ovf_exc) - return exc_type, exc_value - - def get_zero_division_error(self): - exc_type = ootype.cast_to_object(ootype.classof(self.ll_zero_exc)) - exc_value = ootype.cast_to_object(self.ll_zero_exc) - return exc_type, exc_value - - def set_overflow_error(self): - exc_obj = ootype.cast_to_object(self.ll_ovf_exc) - exc_value = dotnet.cast_to_native_object(exc_obj) - self.get_inputargs().set_exc_value(exc_value) - - def set_zero_division_error(self): - exc_obj = ootype.cast_to_object(self.ll_zero_exc) - exc_value = dotnet.cast_to_native_object(exc_obj) - self.get_inputargs().set_exc_value(exc_value) - - # ---------------------- - - def do_new_with_vtable(self, classbox): - cls = classbox.getref_base() - typedescr = self.class_sizes[cls] - return typedescr.create() - - def do_new_array(self, lengthbox, typedescr): - assert isinstance(typedescr, TypeDescr) - return typedescr.create_array(lengthbox) - - def do_runtimenew(self, classbox): - classobj = classbox.getref(ootype.Class) - res = ootype.runtimenew(classobj) - return BoxObj(ootype.cast_to_object(res)) - - def do_instanceof(self, instancebox, typedescr): - assert isinstance(typedescr, TypeDescr) - return typedescr.instanceof(instancebox) - - def do_getfield_gc(self, instancebox, fielddescr): - assert isinstance(fielddescr, FieldDescr) - assert fielddescr.getfield is not None - return fielddescr.getfield(instancebox) - - def do_setfield_gc(self, instancebox, newvaluebox, fielddescr): - assert isinstance(fielddescr, FieldDescr) - assert fielddescr.setfield is not None - return fielddescr.setfield(instancebox, newvaluebox) - - def do_call(self, args, calldescr): - assert isinstance(calldescr, StaticMethDescr) - funcbox, args = args[0], args[1:] - self.clear_exception() - try: - return calldescr.callfunc(funcbox, args) - except Exception, e: - exc_value = self._cast_instance_to_native_obj(e) - self.get_inputargs().set_exc_value(exc_value) - return calldescr.get_errbox() - - def _cast_instance_to_native_obj(self, e): - from rpython.rtyper.annlowlevel import cast_instance_to_base_obj - inst = cast_instance_to_base_obj(e) # SomeOOInstance - obj = ootype.cast_to_object(inst) # SomeOOObject - return dotnet.cast_to_native_object(obj) # System.Object - - def do_oosend(self, args, descr): - assert isinstance(descr, MethDescr) - assert descr.callmeth is not None - selfbox = args[0] - argboxes = args[1:] - return descr.callmeth(selfbox, argboxes) - - def do_getarrayitem_gc(self, arraybox, indexbox, descr): - assert isinstance(descr, TypeDescr) - return descr.getarrayitem(arraybox, indexbox) - - def do_setarrayitem_gc(self, arraybox, indexbox, newvaluebox, descr): - assert isinstance(descr, TypeDescr) - descr.setarrayitem(arraybox, indexbox, newvaluebox) - - def do_arraylen_gc(self, arraybox, descr): - assert isinstance(descr, TypeDescr) - return descr.getarraylength(arraybox) - -# ---------------------------------------------------------------------- -key_manager = KeyManager() - -descr_cache = {} -class DescrWithKey(AbstractDescr): - key = -1 - - @classmethod - def new(cls, *args): - 'NOT_RPYTHON' - key = (cls, args) - try: - return descr_cache[key] - except KeyError: - res = cls(*args) - descr_cache[key] = res - return res - - - def __init__(self, key): - self.key = key_manager.getkey(key) - - def sort_key(self): - return self.key - - def short_repr(self): - return '' - - def repr_of_descr(self): - return self.short_repr() - - -def get_class_for_type(T): - if T is ootype.Void: - return ootype.nullruntimeclass - elif T is ootype.Signed: - return dotnet.classof(System.Int32) - elif T is ootype.Unsigned: - return dotnet.classof(System.UInt32) - elif T is ootype.Bool: - return dotnet.classof(System.Boolean) - elif T is ootype.Float: - return dotnet.classof(System.Double) -## elif T is ootype.String: -## return dotnet.classof(System.String) - elif T in (ootype.Char, ootype.UniChar): - return dotnet.classof(System.Char) - elif isinstance(T, ootype.OOType): - return ootype.runtimeClass(T) - else: - assert False - -class TypeDescr(DescrWithKey): - - def __init__(self, TYPE): - DescrWithKey.__init__(self, TYPE) - from rpython.jit.backend.llgraph.runner import boxresult - from rpython.jit.metainterp.warmstate import unwrap - ARRAY = ootype.Array(TYPE) - def create(): - if isinstance(TYPE, ootype.OOType): - return boxresult(TYPE, ootype.new(TYPE)) - return None - def create_array(lengthbox): - n = lengthbox.getint() - return boxresult(ARRAY, ootype.oonewarray(ARRAY, n)) - def getarrayitem(arraybox, ibox): - array = arraybox.getref(ARRAY) - i = ibox.getint() - if TYPE is not ootype.Void: - return boxresult(TYPE, array.ll_getitem_fast(i)) - def setarrayitem(arraybox, ibox, valuebox): - array = arraybox.getref(ARRAY) - i = ibox.getint() - value = unwrap(TYPE, valuebox) - array.ll_setitem_fast(i, value) - def getarraylength(arraybox): - array = arraybox.getref(ARRAY) - return boxresult(ootype.Signed, array.ll_length()) - def instanceof(box): - if isinstance(TYPE, ootype.Instance): - obj = box.getref(ootype.ROOT) - return BoxInt(ootype.instanceof(obj, TYPE)) - return None - self.create = create - self.create_array = create_array - self.getarrayitem = getarrayitem - self.setarrayitem = setarrayitem - self.getarraylength = getarraylength - self.instanceof = instanceof - self.ooclass = get_class_for_type(TYPE) - self.typename = TYPE._short_name() - self._is_array_of_pointers = (history.getkind(TYPE) == 'ref') - self._is_array_of_floats = (history.getkind(TYPE) == 'float') - - def is_array_of_pointers(self): - # for arrays, TYPE is the type of the array item. - return self._is_array_of_pointers - - def is_array_of_floats(self): - # for arrays, TYPE is the type of the array item. - return self._is_array_of_floats - - def get_clitype(self): - return dotnet.class2type(self.ooclass) - - def get_array_clitype(self): - return self.get_clitype().MakeArrayType() - - def get_constructor_info(self): - clitype = self.get_clitype() - return clitype.GetConstructor(dotnet.new_array(System.Type, 0)) - - def short_repr(self): - return self.typename - -class StaticMethDescr(DescrWithKey): - - callfunc = None - funcclass = ootype.nullruntimeclass - has_result = False - - def __init__(self, FUNC, ARGS, RESULT, extrainfo=None): - DescrWithKey.__init__(self, (FUNC, ARGS, RESULT)) - from rpython.jit.backend.llgraph.runner import boxresult, make_getargs - getargs = make_getargs(FUNC.ARGS) - def callfunc(funcbox, argboxes): - funcobj = funcbox.getref(FUNC) - funcargs = getargs(argboxes) - res = funcobj(*funcargs) - if RESULT is not ootype.Void: - return boxresult(RESULT, res) - self.callfunc = callfunc - self.funcclass = dotnet.classof(FUNC) - self.has_result = (FUNC.RESULT != ootype.Void) - self.extrainfo = extrainfo - if RESULT is ootype.Void: - def get_errbox(): - return None - elif isinstance(RESULT, ootype.OOType): - def get_errbox(): - return BoxObj() - else: - def get_errbox(): - return BoxInt() - self.get_errbox = get_errbox - - def get_delegate_clitype(self): - return dotnet.class2type(self.funcclass) - - def get_meth_info(self): - clitype = self.get_delegate_clitype() - return clitype.GetMethod('Invoke') - - def get_extra_info(self): - return self.extrainfo - - -class MethDescr(AbstractMethDescr): - - callmeth = None - selfclass = ootype.nullruntimeclass - methname = '' - has_result = False - key = -1 - - new = classmethod(DescrWithKey.new.im_func) - - def __init__(self, SELFTYPE, methname): - from rpython.jit.backend.llgraph.runner import boxresult, make_getargs - _, meth = SELFTYPE._lookup(methname) - METH = ootype.typeOf(meth) - getargs = make_getargs(METH.ARGS) - def callmeth(selfbox, argboxes): - selfobj = selfbox.getref(SELFTYPE) - meth = getattr(selfobj, methname) - methargs = getargs(argboxes) - res = meth(*methargs) - if METH.RESULT is not ootype.Void: - return boxresult(METH.RESULT, res) - self.callmeth = callmeth - self.selfclass = ootype.runtimeClass(SELFTYPE) - self.methname = methname - self.has_result = (METH.RESULT != ootype.Void) - self.key = key_manager.getkey((SELFTYPE, methname)) - - def sort_key(self): - return self.key - - def get_self_clitype(self): - return dotnet.class2type(self.selfclass) - - def get_meth_info(self): - clitype = self.get_self_clitype() - return clitype.GetMethod(self.methname+'') - - def get_call_opcode(self): - return OpCodes.Callvirt - - def repr_of_descr(self): - return "'%s'" % self.methname - -class StringMethDescr(MethDescr): - - def get_meth_info(self): - clitype = dotnet.class2type(cpypyString) - return clitype.GetMethod(self.methname+'') - - def get_call_opcode(self): - return OpCodes.Call - - -class FieldDescr(DescrWithKey): - - getfield = None - setfield = None - selfclass = ootype.nullruntimeclass - fieldname = '' - _is_pointer_field = False - _is_float_field = False - - def __init__(self, TYPE, fieldname): - DescrWithKey.__init__(self, (TYPE, fieldname)) - from rpython.jit.backend.llgraph.runner import boxresult - from rpython.jit.metainterp.warmstate import unwrap - _, T = TYPE._lookup_field(fieldname) - def getfield(objbox): - obj = objbox.getref(TYPE) - value = getattr(obj, fieldname) - return boxresult(T, value) - def setfield(objbox, valuebox): - obj = objbox.getref(TYPE) - value = unwrap(T, valuebox) - setattr(obj, fieldname, value) - - self.getfield = getfield - self.setfield = setfield - self.selfclass = ootype.runtimeClass(TYPE) - self.fieldname = fieldname - self.key = key_manager.getkey((TYPE, fieldname)) - self._is_pointer_field = (history.getkind(T) == 'ref') - self._is_float_field = (history.getkind(T) == 'float') - - def is_pointer_field(self): - return self._is_pointer_field - - def is_float_field(self): - return self._is_float_field - - def equals(self, other): - assert isinstance(other, FieldDescr) - return self.key == other.key - - def get_self_clitype(self): - return dotnet.class2type(self.selfclass) - - def get_field_info(self): - clitype = self.get_self_clitype() - return clitype.GetField(self.fieldname+'') - - def short_repr(self): - return "'%s'" % self.fieldname - -CPU = CliCPU - -import rpython.jit.metainterp.executor -rpython.jit.metainterp.executor.make_execute_list(CPU) diff --git a/rpython/jit/backend/cli/test/__init__.py b/rpython/jit/backend/cli/test/__init__.py deleted file mode 100644 diff --git a/rpython/jit/backend/cli/test/conftest.py b/rpython/jit/backend/cli/test/conftest.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/conftest.py +++ /dev/null @@ -1,4 +0,0 @@ -import py - -def pytest_ignore_collect(path): - return True diff --git a/rpython/jit/backend/cli/test/test_basic.py b/rpython/jit/backend/cli/test/test_basic.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_basic.py +++ /dev/null @@ -1,59 +0,0 @@ -import py -from rpython.jit.backend.cli.runner import CliCPU -from rpython.jit.metainterp.test import support, test_ajit - -class CliJitMixin(suport.OOJitMixin): - CPUClass = CliCPU - def setup_class(cls): - from rpython.translator.cli.support import PythonNet - PythonNet.System # possibly raises Skip - -class TestBasic(CliJitMixin, test_ajit.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_basic.py - - def skip(self): - py.test.skip("works only after translation") - - test_string = skip - test_chr2str = skip - test_unicode = skip - test_residual_call = skip - test_constant_across_mp = skip - test_format = skip - test_getfield = skip - test_getfield_immutable = skip - test_print = skip - test_bridge_from_interpreter_2 = skip - test_bridge_from_interpreter_3 = skip - test_bridge_leaving_interpreter_5 = skip - test_instantiate_classes = skip - test_zerodivisionerror = skip - test_isinstance = skip - test_isinstance_2 = skip - test_oois = skip - test_oostring_instance = skip - test_long_long = skip - test_free_object = skip - test_stopatxpolicy = skip - test_residual_call_pure = skip - test_div_overflow = skip - test_subclassof = skip - test_assert_isinstance = skip - test_dont_look_inside = skip - test_setfield_bool = skip - test_instantiate_does_not_call = skip - test_listcomp = skip - test_tuple_immutable = skip - test_oosend_look_inside_only_one = skip - test_residual_external_call = skip - test_merge_guardclass_guardvalue = skip - test_merge_guardnonnull_guardclass = skip - test_merge_guardnonnull_guardvalue = skip - test_merge_guardnonnull_guardvalue_2 = skip - test_merge_guardnonnull_guardclass_guardvalue = skip - test_residual_call_doesnt_lose_info = skip - test_oohash = skip - test_identityhash = skip - test_guard_isnull_nonnull = skip - test_r_dict = skip diff --git a/rpython/jit/backend/cli/test/test_descr.py b/rpython/jit/backend/cli/test/test_descr.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_descr.py +++ /dev/null @@ -1,24 +0,0 @@ -from rpython.rtyper.ootypesystem import ootype -from rpython.jit.backend.cli.runner import CliCPU - - -def test_fielddescr_ootype(): - A = ootype.Instance("A", ootype.ROOT, {"foo": ootype.Signed}) - B = ootype.Instance("B", A) - descr1 = CliCPU.fielddescrof(A, "foo") - descr2 = CliCPU.fielddescrof(B, "foo") - assert descr1 is descr2 - -def test_call_descr_extra_info(): - FUNC = ootype.StaticMethod([], ootype.Signed) - ARGS = () - descr1 = CliCPU.calldescrof(FUNC, ARGS, ootype.Signed, "hello") - extrainfo = descr1.get_extra_info() - assert extrainfo == "hello" - - descr2 = CliCPU.calldescrof(FUNC, ARGS, ootype.Signed, "hello") - assert descr2 is descr1 - - descr3 = CliCPU.calldescrof(FUNC, ARGS, ootype.Signed) - assert descr3 is not descr1 - assert descr3.get_extra_info() is None diff --git a/rpython/jit/backend/cli/test/test_exception.py b/rpython/jit/backend/cli/test/test_exception.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_exception.py +++ /dev/null @@ -1,28 +0,0 @@ -import py -from rpython.jit.metainterp.test import test_exception -from rpython.jit.backend.cli.test.test_basic import CliJitMixin - - -class TestException(CliJitMixin, test_exception.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_exception.py - - def skip(self): - py.test.skip("works only after translation") - - test_simple = skip - test_bridge_from_guard_exception = skip - test_bridge_from_guard_no_exception = skip - test_four_levels_checks = skip - test_exception_from_outside = skip - test_exception_from_outside_2 = skip - test_exception_two_cases = skip - test_exception_two_cases_2 = skip - test_exception_four_cases = skip - test_exception_later = skip - test_exception_and_then_no_exception = skip - test_raise_through = skip - test_raise_through_wrong_exc = skip - test_raise_through_wrong_exc_2 = skip - test_bridge_from_interpreter_exc = skip - test_bridge_from_interpreter_exc_2 = skip diff --git a/rpython/jit/backend/cli/test/test_list.py b/rpython/jit/backend/cli/test/test_list.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_list.py +++ /dev/null @@ -1,15 +0,0 @@ -import py -from rpython.jit.metainterp.test import test_list -from rpython.jit.backend.cli.test.test_basic import CliJitMixin - - -class TestVlist(CliJitMixin, test_list.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_list.py - - def skip(self): - py.test.skip("works only after translation") - - test_list_pass_around = skip - test_cannot_be_virtual = skip - test_ll_fixed_setitem_fast = skip diff --git a/rpython/jit/backend/cli/test/test_loop.py b/rpython/jit/backend/cli/test/test_loop.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_loop.py +++ /dev/null @@ -1,22 +0,0 @@ -import py -from rpython.jit.metainterp.test import test_loop -from rpython.jit.backend.cli.test.test_basic import CliJitMixin - - -class TestLoop(CliJitMixin, test_loop.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_loop.py - - def skip(self): - py.test.skip("works only after translation") - - test_loop_with_two_paths = skip - test_interp_many_paths = skip - test_interp_many_paths_2 = skip - test_adapt_bridge_to_merge_point = skip - test_outer_and_inner_loop = skip - test_path_with_operations_not_from_start_2 = skip - test_loop_unicode = skip - test_loop_string = skip - test_loop_with_delayed_setfield = skip - diff --git a/rpython/jit/backend/cli/test/test_runner.py b/rpython/jit/backend/cli/test/test_runner.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_runner.py +++ /dev/null @@ -1,76 +0,0 @@ -import py -from rpython.jit.backend.cli.runner import CliCPU -from rpython.jit.backend.test.runner_test import OOtypeBackendTest - -class FakeStats(object): - pass - -# ____________________________________________________________ - -class CliJitMixin(object): - - typesystem = 'ootype' - CPUClass = CliCPU - - # for the individual tests see - # ====> ../../test/runner_test.py - - def setup_class(cls): - cls.cpu = cls.CPUClass(rtyper=None, stats=FakeStats()) - - -class TestRunner(CliJitMixin, OOtypeBackendTest): - avoid_instances = True - - def skip(self): - py.test.skip("not supported in non-translated version") - - test_passing_guard_class = skip # GUARD_CLASS - test_failing_guard_class = skip # GUARD_CLASS - test_call = skip - test_field = skip - test_field_basic = skip - test_ooops = skip - test_jump = skip - - def test_unused_result_float(self): - py.test.skip('fixme! max 32 inputargs so far') - - def test_ovf_operations(self, reversed=False): - self.skip() - - def test_do_unicode_basic(self): - py.test.skip('fixme!') - - def test_unicode_basic(self): - py.test.skip('fixme!') - - def test_backends_dont_keep_loops_alive(self): - pass # the cli backend DOES keep loops alive - -def test_pypycliopt(): - import os - from rpython.jit.backend.cli.method import Method - - def getmeth(value): - oldenv = os.environ.get('PYPYJITOPT') - os.environ['PYPYJITOPT'] = value - meth = Method.__new__(Method) # evil hack not to call __init__ - meth.setoptions() - if oldenv: - os.environ['PYPYJITOPT'] = oldenv - else: - del os.environ['PYPYJITOPT'] - return meth - - meth = getmeth('') - assert meth.debug == Method.debug - assert meth.tailcall == Method.tailcall - - meth = getmeth('debug -tailcall') - assert meth.debug - assert not meth.tailcall - - meth = getmeth('+debug +tailcall') - assert meth.debug - assert meth.tailcall diff --git a/rpython/jit/backend/cli/test/test_zrpy_basic.py b/rpython/jit/backend/cli/test/test_zrpy_basic.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_zrpy_basic.py +++ /dev/null @@ -1,36 +0,0 @@ -import py -from rpython.jit.backend.cli.runner import CliCPU -from rpython.jit.backend.test.support import CliCompiledMixin -from rpython.jit.metainterp.test import test_basic - -class CliTranslatedJitMixin(CliCompiledMixin): - CPUClass = CliCPU - - def meta_interp(self, *args, **kwds): - from rpython.rlib.jit import OPTIMIZER_SIMPLE - kwds['optimizer'] = OPTIMIZER_SIMPLE - return CliCompiledMixin.meta_interp(self, *args, **kwds) - - -class TestBasic(CliTranslatedJitMixin, test_basic.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_basic.py - - def mono_bug(self): - py.test.skip('mono bug?') - - def skip(self): - py.test.skip('in-progress') - - test_stopatxpolicy = mono_bug - - test_print = skip - test_bridge_from_interpreter = skip - test_bridge_from_interpreter_2 = skip - test_free_object = skip - - def test_bridge_from_interpreter_4(self): - pass # not a translation test - - def test_we_are_jitted(self): - py.test.skip("it seems to fail even with the x86 backend, didn't investigate the problem") diff --git a/rpython/jit/backend/cli/test/test_zrpy_exception.py b/rpython/jit/backend/cli/test/test_zrpy_exception.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_zrpy_exception.py +++ /dev/null @@ -1,11 +0,0 @@ -import py -from rpython.jit.backend.cli.test.test_zrpy_basic import CliTranslatedJitMixin -from rpython.jit.metainterp.test import test_exception - - -class TestException(CliTranslatedJitMixin, test_exception.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_exception.py - - pass - diff --git a/rpython/jit/backend/cli/test/test_zrpy_list.py b/rpython/jit/backend/cli/test/test_zrpy_list.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_zrpy_list.py +++ /dev/null @@ -1,10 +0,0 @@ -import py -from rpython.jit.backend.cli.test.test_zrpy_basic import CliTranslatedJitMixin -from rpython.jit.metainterp.test import test_list - - -class TestVList(CliTranslatedJitMixin, test_list.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_list.py - - pass diff --git a/rpython/jit/backend/cli/test/test_zrpy_loop.py b/rpython/jit/backend/cli/test/test_zrpy_loop.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_zrpy_loop.py +++ /dev/null @@ -1,19 +0,0 @@ -import py -from rpython.jit.backend.cli.test.test_zrpy_basic import CliTranslatedJitMixin -from rpython.jit.metainterp.test import test_loop - - -class TestLoop(CliTranslatedJitMixin, test_loop.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_loop.py - - def skip(self): - py.test.skip('in-progress') - - def test_interp_many_paths(self): - pass # no chance to pass it after translation, because it passes - # non-int arguments to the function - - def test_interp_many_paths_2(self): - pass # see above - diff --git a/rpython/jit/backend/cli/test/test_zrpy_send.py b/rpython/jit/backend/cli/test/test_zrpy_send.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_zrpy_send.py +++ /dev/null @@ -1,17 +0,0 @@ -import py -from rpython.jit.backend.cli.test.test_zrpy_basic import CliTranslatedJitMixin -from rpython.jit.metainterp.test import test_send - - -class TestSend(CliTranslatedJitMixin, test_send.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_send.py - - def test_recursive_call_to_portal_from_blackhole(self): - py.test.skip('string return values are not supported') - - test_oosend_guard_failure = py.test.mark.xfail( - test_send.TestOOtype.test_oosend_guard_failure.im_func) - - test_oosend_guard_failure_2 = py.test.mark.xfail( - test_send.TestOOtype.test_oosend_guard_failure_2.im_func) diff --git a/rpython/jit/backend/cli/test/test_zrpy_slist.py b/rpython/jit/backend/cli/test/test_zrpy_slist.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_zrpy_slist.py +++ /dev/null @@ -1,10 +0,0 @@ -import py -py.test.skip('decide what to do') -from rpython.jit.backend.cli.test.test_zrpy_basic import CliTranslatedJitMixin -from rpython.jit.metainterp.test import test_slist - - -class TestSList(CliTranslatedJitMixin, test_slist.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_slist.py - pass diff --git a/rpython/jit/backend/cli/test/test_zrpy_virtualizable.py b/rpython/jit/backend/cli/test/test_zrpy_virtualizable.py deleted file mode 100644 --- a/rpython/jit/backend/cli/test/test_zrpy_virtualizable.py +++ /dev/null @@ -1,10 +0,0 @@ -import py -from rpython.jit.backend.cli.test.test_zrpy_basic import CliTranslatedJitMixin -from rpython.jit.metainterp.test import test_virtualizable - - -class TestVirtualizable(CliTranslatedJitMixin, test_virtualizable.TestOOtype): - # for the individual tests see - # ====> ../../../metainterp/test/test_virtualizable.py - - pass From noreply at buildbot.pypy.org Sun Jul 7 14:22:23 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 14:22:23 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Fix rpython/jit tests Message-ID: <20130707122223.F23401C0512@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65251:304d0b30077c Date: 2013-07-07 05:15 +0200 http://bitbucket.org/pypy/pypy/changeset/304d0b30077c/ Log: Fix rpython/jit tests diff --git a/rpython/jit/metainterp/test/test_warmstate.py b/rpython/jit/metainterp/test/test_warmstate.py --- a/rpython/jit/metainterp/test/test_warmstate.py +++ b/rpython/jit/metainterp/test/test_warmstate.py @@ -1,6 +1,5 @@ from rpython.rtyper.test.test_llinterp import interpret from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rffi -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.annlowlevel import llhelper from rpython.jit.metainterp.warmstate import wrap, unwrap, specialize_value from rpython.jit.metainterp.warmstate import equal_whatever, hash_whatever @@ -77,16 +76,6 @@ fn(42) interpret(fn, [42], type_system='lltype') -def test_hash_equal_whatever_ootype(): - def fn(c): - s1 = ootype.oostring("xy", -1) - s2 = ootype.oostring("x" + chr(c), -1) - assert (hash_whatever(ootype.typeOf(s1), s1) == - hash_whatever(ootype.typeOf(s2), s2)) - assert equal_whatever(ootype.typeOf(s1), s1, s2) - fn(ord('y')) - interpret(fn, [ord('y')], type_system='ootype') - def test_make_jitcell_getter_default(): class FakeJitDriverSD: From noreply at buildbot.pypy.org Sun Jul 7 14:22:25 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 14:22:25 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove rpython.translator.cli Message-ID: <20130707122225.6A2CE1C0512@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65252:1b0f73ac1adb Date: 2013-07-07 13:46 +0200 http://bitbucket.org/pypy/pypy/changeset/1b0f73ac1adb/ Log: Remove rpython.translator.cli diff too long, truncating to 2000 out of 9030 lines diff --git a/rpython/translator/cli/__init__.py b/rpython/translator/cli/__init__.py deleted file mode 100644 diff --git a/rpython/translator/cli/class_.py b/rpython/translator/cli/class_.py deleted file mode 100644 --- a/rpython/translator/cli/class_.py +++ /dev/null @@ -1,159 +0,0 @@ -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.node import Node -from rpython.translator.cli.cts import CTS -from rpython.translator.oosupport.constant import push_constant -from rpython.translator.cli.ilgenerator import CLIBaseGenerator - -try: - set -except NameError: - from sets import Set as set - -class Class(Node): - def __init__(self, db, INSTANCE, namespace, name): - self.db = db - self.cts = db.genoo.TypeSystem(db) - self.INSTANCE = INSTANCE - self.namespace = namespace - self.name = name - - def dependencies(self): - if not self.is_root(self.INSTANCE): - self.db.pending_class(self.INSTANCE._superclass) - - def __hash__(self): - return hash(self.INSTANCE) - - def __eq__(self, other): - return self.INSTANCE == other.INSTANCE - - def __ne__(self, other): - return not self == other - - def is_root(INSTANCE): - return INSTANCE._superclass is None - is_root = staticmethod(is_root) - - def get_name(self): - return self.name - - def __repr__(self): - return '' % self.name - - def get_base_class(self): - base_class = self.INSTANCE._superclass - if self.INSTANCE is self.db.genoo.EXCEPTION: - assert self.is_root(base_class) - return '[mscorlib]System.Exception' - if self.is_root(base_class): - return '[mscorlib]System.Object' - else: - return self.db.class_name(base_class) - - def is_abstract(self): - return False # XXX - - # if INSTANCE has an abstract method, the class is abstract - method_names = set() - for m_name, m_meth in self.INSTANCE._methods.iteritems(): - if not hasattr(m_meth, 'graph'): - return True - method_names.add(m_name) - - # if superclasses have abstract methods not overriden by - # INSTANCE, the class is abstract - abstract_method_names = set() - cls = self.INSTANCE._superclass - while cls is not None: - abstract_method_names.update(cls._methods.keys()) - cls = cls._superclass - not_overriden = abstract_method_names.difference(method_names) - if not_overriden: - return True - - return False - - def render(self, ilasm): - if self.is_root(self.INSTANCE): - return - - self.ilasm = ilasm - self.gen = CLIBaseGenerator(self.db, ilasm) - - if self.namespace: - ilasm.begin_namespace(self.namespace) - - ilasm.begin_class(self.name, self.get_base_class(), abstract=self.is_abstract()) - for f_name, (f_type, f_default) in self.INSTANCE._fields.iteritems(): - cts_type = self.cts.lltype_to_cts(f_type) - f_name = self.cts.escape_name(f_name) - if cts_type != CTS.types.void: - ilasm.field(f_name, cts_type) - - self._ctor() - self._toString() - - for m_name, m_meth in self.INSTANCE._methods.iteritems(): - if hasattr(m_meth, 'graph'): - # if the first argument's type is not a supertype of - # this class it means that this method this method is - # not really used by the class: don't render it, else - # there would be a type mismatch. - args = m_meth.graph.getargs() - SELF = args[0].concretetype -## if not ootype.isSubclass(self.INSTANCE, SELF): -## continue - f = self.db.genoo.Function(self.db, m_meth.graph, m_name, is_method = True) - f.render(ilasm) - else: - # abstract method - METH = m_meth._TYPE - arglist = [(self.cts.lltype_to_cts(ARG), 'v%d' % i) - for i, ARG in enumerate(METH.ARGS) - if ARG is not ootype.Void] - returntype = self.cts.lltype_to_cts(METH.RESULT) - ilasm.begin_function(m_name, arglist, returntype, False, 'virtual') #, 'abstract') - ilasm.add_comment('abstract method') - if isinstance(METH.RESULT, ootype.OOType): - ilasm.opcode('ldnull') - else: - push_constant(self.db, METH.RESULT, METH.RESULT._defl(), self.gen) - ilasm.opcode('ret') - ilasm.end_function() - - ilasm.end_class() - - if self.namespace: - ilasm.end_namespace() - - def _ctor(self): - self.ilasm.begin_function('.ctor', [], 'void', False, 'specialname', 'rtspecialname', 'instance') - self.ilasm.opcode('ldarg.0') - self.ilasm.call('instance void %s::.ctor()' % self.get_base_class()) - # set default values for fields - default_values = self.INSTANCE._fields.copy() - default_values.update(self.INSTANCE._overridden_defaults) - for f_name, (F_TYPE, f_default) in default_values.iteritems(): - if getattr(F_TYPE, '_is_value_type', False): - continue # we can't set it to null - INSTANCE_DEF, _ = self.INSTANCE._lookup_field(f_name) - cts_type = self.cts.lltype_to_cts(F_TYPE) - f_name = self.cts.escape_name(f_name) - if cts_type != CTS.types.void: - self.ilasm.opcode('ldarg.0') - push_constant(self.db, F_TYPE, f_default, self.gen) - class_name = self.db.class_name(INSTANCE_DEF) - self.ilasm.set_field((cts_type, class_name, f_name)) - - self.ilasm.opcode('ret') - self.ilasm.end_function() - - def _toString(self): - if ootype.isSubclass(self.INSTANCE, self.db.genoo.EXCEPTION): - return # don't override the default ToString, which prints a traceback - self.ilasm.begin_function('ToString', [], 'string', False, 'virtual', 'instance', 'default') - self.ilasm.opcode('ldarg.0') - self.ilasm.call('string class [pypylib]pypy.test.Result::InstanceToPython(object)') - self.ilasm.ret() - self.ilasm.end_function() - diff --git a/rpython/translator/cli/comparer.py b/rpython/translator/cli/comparer.py deleted file mode 100644 --- a/rpython/translator/cli/comparer.py +++ /dev/null @@ -1,67 +0,0 @@ -import types -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.cts import CTS -from rpython.translator.cli.node import Node - -IEQUALITY_COMPARER = 'class [mscorlib]System.Collections.Generic.IEqualityComparer`1<%s>' - -class EqualityComparer(Node): - count = 0 - - def __init__(self, db, KEY_TYPE, eq_args, hash_args): - self.db = db - self.cts = CTS(db) - self.KEY_TYPE = KEY_TYPE - self.key_type = self.cts.lltype_to_cts(KEY_TYPE) - self.eq_args = eq_args - self.hash_args = hash_args - self.name = 'EqualityComparer_%d' % EqualityComparer.count - EqualityComparer.count += 1 - - def get_ctor(self): - return 'instance void %s::.ctor()' % self.name - - def render(self, ilasm): - self.ilasm = ilasm - IEqualityComparer = IEQUALITY_COMPARER % self.key_type - ilasm.begin_class(self.name, interfaces=[IEqualityComparer]) - self._ctor() - self._method('Equals', [(self.key_type, 'x'), (self.key_type, 'y')], - 'bool', self.eq_args) - self._method('GetHashCode', [(self.key_type, 'x')], 'int32', self.hash_args) - ilasm.end_class() - - def _ctor(self): - self.ilasm.begin_function('.ctor', [], 'void', False, 'specialname', 'rtspecialname', 'instance') - self.ilasm.opcode('ldarg.0') - self.ilasm.call('instance void [mscorlib]System.Object::.ctor()') - self.ilasm.opcode('ret') - self.ilasm.end_function() - - def _method(self, name, arglist, return_type, fn_args): - self.ilasm.begin_function(name, arglist, return_type, False, - 'final', 'virtual', 'hidebysig', 'newslot', - 'instance', 'default') - - if type(fn_args) == types.FunctionType: - assert len(fn_args.self_arg) <= 1 - if len(fn_args.self_arg) == 1: - assert fn_args.graph.getargs()[0].concretetype is ootype.Void - self._call_function(fn_args.graph, len(arglist)) - else: - fn, obj, method_name = fn_args - # fn is a Constant(StaticMethod) - if method_name.value is None: - self._call_function(fn.value.graph, len(arglist)) - else: - assert False, 'XXX' - - self.ilasm.end_function() - - def _call_function(self, graph, n_args): - self.db.pending_function(graph) - for arg in range(1, n_args+1): - self.ilasm.opcode('ldarg', arg) - signature = self.cts.graph_to_signature(graph) - self.ilasm.call(signature) - self.ilasm.opcode('ret') diff --git a/rpython/translator/cli/conftest.py b/rpython/translator/cli/conftest.py deleted file mode 100644 --- a/rpython/translator/cli/conftest.py +++ /dev/null @@ -1,18 +0,0 @@ -def pytest_addoption(parser): - group = parser.getgroup("pypy-cli options") - group.addoption('--source', action="store_true", dest="source", default=False, - help="only generate IL source, don't compile") - group.addoption('--wd', action="store_true", dest="wd", default=False, - help="store temporary files in the working directory") - group.addoption('--stdout', action="store_true", dest="stdout", default=False, - help="print the generated IL code to stdout, too") - group.addoption('--nostop', action="store_true", dest="nostop", default=False, - help="don't stop on warning. The generated IL code could not compile") - group.addoption('--nowrap', action="store_true", dest="nowrap", default=False, - help="don't wrap exceptions but let them to flow out of the entry point") - group.addoption('--verify', action="store_true", dest="verify", default=False, - help="check that compiled executables are verifiable") - group.addoption('--norun', action='store_true', dest="norun", default=False, - help="don't run the compiled executable") - group.addoption('--trace', action='store_true', dest='trace', default=False, - help='Trace execution of generated code') diff --git a/rpython/translator/cli/constant.py b/rpython/translator/cli/constant.py deleted file mode 100644 --- a/rpython/translator/cli/constant.py +++ /dev/null @@ -1,314 +0,0 @@ -""" -___________________________________________________________________________ -CLI Constants - -This module extends the oosupport/constant.py to be specific to the -CLI. Most of the code in this file is in the constant generators, which -determine how constants are stored and loaded (static fields, lazy -initialization, etc), but some constant classes have been overloaded or -extended to allow for special handling. - -The CLI implementation is broken into three sections: - -* Constant Generators: different generators implementing different - techniques for loading constants (Static fields, singleton fields, etc) - -* Mixins: mixins are used to add a few CLI-specific methods to each - constant class. Basically, any time I wanted to extend a base class - (such as AbstractConst or DictConst), I created a mixin, and then - mixed it in to each sub-class of that base-class. - -* Subclasses: here are the CLI specific classes. Eventually, these - probably wouldn't need to exist at all (the JVM doesn't have any, - for example), or could simply have empty bodies and exist to - combine a mixin and the generic base class. For now, though, they - contain the create_pointer() and initialize_data() routines. -""" - -from rpython.translator.oosupport.constant import \ - push_constant, WeakRefConst, StaticMethodConst, CustomDictConst, \ - ListConst, ClassConst, InstanceConst, RecordConst, DictConst, \ - BaseConstantGenerator, AbstractConst, ArrayConst -from rpython.translator.cli.ilgenerator import CLIBaseGenerator -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.comparer import EqualityComparer -from rpython.rtyper.lltypesystem import lltype -from rpython.translator.cli.cts import PYPY_DICT_OF_VOID, WEAKREF - -CONST_NAMESPACE = 'pypy.runtime' -CONST_CLASSNAME = 'Constants' -CONST_CLASS = '%s.%s' % (CONST_NAMESPACE, CONST_CLASSNAME) - -DEBUG_CONST_INIT = False -DEBUG_CONST_INIT_VERBOSE = False -SERIALIZE = False - -# ______________________________________________________________________ -# Constant Generators -# -# Different generators implementing different techniques for loading -# constants (Static fields, singleton fields, etc) - -class CLIConstantGenerator(BaseConstantGenerator): - """ - Base of all CLI constant generators. It implements the oosupport - constant generator in terms of the CLI interface. - """ - - def __init__(self, db): - BaseConstantGenerator.__init__(self, db) - self.cts = db.genoo.TypeSystem(db) - - def _begin_gen_constants(self, ilasm, all_constants): - self.ilasm = ilasm - self.begin_class() - gen = CLIBaseGenerator(self.db, ilasm) - return gen - - def _end_gen_constants(self, gen, numsteps): - - self.ilasm.begin_function('.cctor', [], 'void', False, 'static', - 'specialname', 'rtspecialname', 'default') - self.ilasm.stderr('CONST: initialization starts', DEBUG_CONST_INIT) - for i in range(numsteps): - self.ilasm.stderr('CONST: step %d of %d' % (i, numsteps), - DEBUG_CONST_INIT) - step_name = 'step%d' % i - self.ilasm.call('void %s::%s()' % (CONST_CLASS, step_name)) - self.ilasm.stderr('CONST: initialization completed', DEBUG_CONST_INIT) - self.ilasm.ret() - self.ilasm.end_function() - - self.end_class() - - def begin_class(self): - self.ilasm.begin_namespace(CONST_NAMESPACE) - self.ilasm.begin_class(CONST_CLASSNAME, beforefieldinit=True) - - def end_class(self): - self.ilasm.end_class() - self.ilasm.end_namespace() - - def _declare_const(self, gen, const): - self.ilasm.field(const.name, const.get_type(), static=True) - - def downcast_constant(self, gen, const, EXPECTED_TYPE): - type = self.cts.lltype_to_cts(EXPECTED_TYPE) - gen.ilasm.opcode('castclass', type) - - def _get_key_for_const(self, value): - if isinstance(value, ootype._view) and isinstance(value._inst, ootype._record): - return value._inst - return BaseConstantGenerator._get_key_for_const(self, value) - - def _create_complex_const(self, value): - if isinstance(value, ootype._view) and isinstance(value._inst, ootype._record): - self.db.cts.lltype_to_cts(value._inst._TYPE) # record the type of the record - return self.record_const(value._inst) - else: - return BaseConstantGenerator._create_complex_const(self, value) - - # _________________________________________________________________ - # OOSupport interface - - def push_constant(self, gen, const): - type_ = const.get_type() - gen.ilasm.load_static_constant(type_, CONST_NAMESPACE, CONST_CLASSNAME, const.name) - - def _push_constant_during_init(self, gen, const): - full_name = '%s::%s' % (CONST_CLASS, const.name) - gen.ilasm.opcode('ldsfld %s %s' % (const.get_type(), full_name)) - - def _store_constant(self, gen, const): - type_ = const.get_type() - gen.ilasm.store_static_constant(type_, CONST_NAMESPACE, CONST_CLASSNAME, const.name) - - # _________________________________________________________________ - # CLI interface - - def _declare_step(self, gen, stepnum): - gen.ilasm.begin_function( - 'step%d' % stepnum, [], 'void', False, 'static') - - def _close_step(self, gen, stepnum): - gen.ilasm.ret() - gen.ilasm.end_function() - - - - -# ______________________________________________________________________ -# Mixins -# -# Mixins are used to add a few CLI-specific methods to each constant -# class. Basically, any time I wanted to extend a base class (such as -# AbstractConst or DictConst), I created a mixin, and then mixed it in -# to each sub-class of that base-class. Kind of awkward. - -class CLIBaseConstMixin(object): - """ A mix-in with a few extra methods the CLI backend uses """ - - def get_type(self): - """ Returns the CLI type for this constant's representation """ - return self.cts.lltype_to_cts(self.value._TYPE) - - def push_inline(self, gen, TYPE): - """ Overload the oosupport version so that we use the CLI opcode - for pushing NULL """ - assert self.is_null() - gen.ilasm.opcode('ldnull') - -class CLIDictMixin(CLIBaseConstMixin): - def _check_for_void_dict(self, gen): - KEYTYPE = self.value._TYPE._KEYTYPE - keytype = self.cts.lltype_to_cts(KEYTYPE) - keytype_T = self.cts.lltype_to_cts(self.value._TYPE.KEYTYPE_T) - VALUETYPE = self.value._TYPE._VALUETYPE - valuetype = self.cts.lltype_to_cts(VALUETYPE) - valuetype_T = self.cts.lltype_to_cts(self.value._TYPE.VALUETYPE_T) - if VALUETYPE is ootype.Void: - gen.add_comment(' CLI Dictionary w/ void value') - class_name = PYPY_DICT_OF_VOID % keytype - for key in self.value._dict: - gen.ilasm.opcode('dup') - push_constant(self.db, KEYTYPE, key, gen) - meth = 'void class %s::ll_set(%s)' % (class_name, keytype_T) - gen.ilasm.call_method(meth, False) - return True - return False - - def initialize_data(self, constgen, gen): - # special case: dict of void, ignore the values - if self._check_for_void_dict(gen): - return - return super(CLIDictMixin, self).initialize_data(constgen, gen) - -# ______________________________________________________________________ -# Constant Classes -# -# Here we overload a few methods, and mix in the base classes above. -# Note that the mix-ins go first so that they overload methods where -# required. -# -# Eventually, these probably wouldn't need to exist at all (the JVM -# doesn't have any, for example), or could simply have empty bodies -# and exist to combine a mixin and the generic base class. For now, -# though, they contain the create_pointer() and initialize_data() -# routines. In order to get rid of them, we would need to implement -# the generator interface in the CLI. - -class CLIRecordConst(CLIBaseConstMixin, RecordConst): - def create_pointer(self, gen): - self.db.const_count.inc('Record') - super(CLIRecordConst, self).create_pointer(gen) - -class CLIInstanceConst(CLIBaseConstMixin, InstanceConst): - def create_pointer(self, gen): - self.db.const_count.inc('Instance') - self.db.const_count.inc('Instance', self.OOTYPE()) - super(CLIInstanceConst, self).create_pointer(gen) - - -class CLIClassConst(CLIBaseConstMixin, ClassConst): - def is_inline(self): - return True - - def push_inline(self, gen, EXPECTED_TYPE): - if not self.is_null(): - if hasattr(self.value, '_FUNC'): - FUNC = self.value._FUNC - classname = self.db.record_delegate(FUNC) - else: - TYPE = self.value._INSTANCE - classname = self.db.class_or_record_name(TYPE) - gen.ilasm.opcode('ldtoken', classname) - gen.ilasm.call('class [mscorlib]System.Type class [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)') - return - super(CLIClassConst, self).push_inline(gen, EXPECTED_TYPE) - -class CLIListConst(CLIBaseConstMixin, ListConst): - - def _do_not_initialize(self): - # Check if it is a list of all zeroes: - try: - if self.value._list == [0] * len(self.value._list): - return True - except: - pass - return super(CLIListConst, self)._do_not_initialize() - - def create_pointer(self, gen): - self.db.const_count.inc('List') - self.db.const_count.inc('List', self.value._TYPE.ITEM) - self.db.const_count.inc('List', len(self.value._list)) - super(CLIListConst, self).create_pointer(gen) - - -class CLIArrayConst(CLIBaseConstMixin, ArrayConst): - - def _do_not_initialize(self): - # Check if it is an array of all zeroes: - try: - if self.value._list == [0] * len(self.value._list): - return True - except: - pass - return super(CLIArrayConst, self)._do_not_initialize() - - def _setitem(self, SELFTYPE, gen): - gen.array_setitem(SELFTYPE) - - -class CLIDictConst(CLIDictMixin, DictConst): - def create_pointer(self, gen): - self.db.const_count.inc('Dict') - self.db.const_count.inc('Dict', self.value._TYPE._KEYTYPE, self.value._TYPE._VALUETYPE) - super(CLIDictConst, self).create_pointer(gen) - -class CLICustomDictConst(CLIDictMixin, CustomDictConst): - def record_dependencies(self): - if not self.value: - return - eq = self.value._dict.key_eq - hash = self.value._dict.key_hash - self.comparer = EqualityComparer(self.db, self.value._TYPE._KEYTYPE, eq, hash) - self.db.pending_node(self.comparer) - super(CLICustomDictConst, self).record_dependencies() - - def create_pointer(self, gen): - assert not self.is_null() - gen.ilasm.new(self.comparer.get_ctor()) - class_name = self.get_type() - gen.ilasm.new('instance void %s::.ctor(class ' - '[mscorlib]System.Collections.Generic.IEqualityComparer`1)' - % class_name) - self.db.const_count.inc('CustomDict') - self.db.const_count.inc('CustomDict', self.value._TYPE._KEYTYPE, self.value._TYPE._VALUETYPE) - -class CLIStaticMethodConst(CLIBaseConstMixin, StaticMethodConst): - def create_pointer(self, gen): - assert not self.is_null() - signature = self.cts.static_meth_to_signature(self.value) - gen.ilasm.opcode('ldnull') - gen.ilasm.opcode('ldftn', signature) - gen.ilasm.new('instance void class %s::.ctor(object, native int)' % self.delegate_type) - self.db.const_count.inc('StaticMethod') - - def initialize_data(self, constgen, gen): - return - - -class CLIWeakRefConst(CLIBaseConstMixin, WeakRefConst): - def create_pointer(self, gen): - gen.ilasm.new('instance void %s::.ctor()' % self.get_type()) - self.db.const_count.inc('WeakRef') - - def get_type(self, include_class=True): - return 'class ' + WEAKREF - - def initialize_data(self, constgen, gen): - if self.value is not None: - push_constant(self.db, self.value._TYPE, self.value, gen) - gen.ilasm.call_method('void %s::ll_set(object)' % self.get_type(), True) - return True - diff --git a/rpython/translator/cli/cts.py b/rpython/translator/cli/cts.py deleted file mode 100644 --- a/rpython/translator/cli/cts.py +++ /dev/null @@ -1,408 +0,0 @@ -""" -Translate between PyPy ootypesystem and .NET Common Type System -""" - -import exceptions - -from py.builtin import set -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem import rffi -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.option import getoption -from rpython.translator.cli import oopspec - -from rpython.tool.ansi_print import ansi_log -import py -log = py.log.Producer("cli") -py.log.setconsumer("cli", ansi_log) - -class CliType(object): - def typename(self): - raise NotImplementedError - - def __str__(self): - return self.typename() - - def __hash__(self): - return hash(self.typename()) - - def __eq__(self, other): - return self.typename() == other.typename() - - def __ne__(self, other): - return self.typename() != other.typename() - - -class CliPrimitiveType(CliType): - def __init__(self, name): - self.name = name - - def typename(self): - return self.name - - -class CliReferenceType(CliType): - prefix = 'class ' - - def typename(self): - return self.prefix + self.classname() - - def classname(self): - raise NotImplementedError - -class CliClassType(CliReferenceType): - def __init__(self, assembly, name): - self.assembly = assembly - self.name = name - - def classname(self): - if self.assembly: - return '[%s]%s' % (self.assembly, self.name) - else: - return self.name - -class CliValueType(CliClassType): - prefix = 'valuetype ' - - -class CliGenericType(CliReferenceType): - def __init__(self, assembly, name, numparam): - self.assembly = assembly - self.name = name - self.numparam = numparam - - def classname(self): - paramtypes = [self.paramtype(i) for i in range(self.numparam)] - thistype = self.specialize(*paramtypes) - return thistype.classname() - - def specialize(self, *types): - assert len(types) == self.numparam - return CliSpecializedType(self, types) - - def paramtype(self, num): - assert 0 <= num < self.numparam - return CliPrimitiveType('!%d' % num) - -class CliSpecializedType(CliReferenceType): - def __init__(self, generic_type, arg_types): - self.generic_type = generic_type - self.arg_types = arg_types - - def classname(self): - assembly = self.generic_type.assembly - name = self.generic_type.name - numparam = self.generic_type.numparam - arglist = ', '.join([arg.typename() for arg in self.arg_types]) - return '[%s]%s`%d<%s>' % (assembly, name, numparam, arglist) - -class CliArrayType(CliType): - - def __init__(self, itemtype): - self.itemtype = itemtype - - def typename(self): - return '%s[]' % self.itemtype.typename() - - -T = CliPrimitiveType -class types: - void = T('void') - int16 = T('int16') - int32 = T('int32') - uint32 = T('unsigned int32') - int64 = T('int64') - uint64 = T('unsigned int64') - bool = T('bool') - float64 = T('float64') - char = T('char') - string = T('string') - - weakref = CliClassType('pypylib', 'pypy.runtime.WeakReference') - type = CliClassType('mscorlib', 'System.Type') - object = CliClassType('mscorlib', 'System.Object') - list = CliGenericType('pypylib', 'pypy.runtime.List', 1) - list_of_void = CliClassType('pypylib', 'pypy.runtime.ListOfVoid') - dict = CliGenericType('pypylib', 'pypy.runtime.Dict', 2) - dict_void_void = CliClassType('pypylib', 'pypy.runtime.DictVoidVoid') - dict_items_iterator = CliGenericType('pypylib', 'pypy.runtime.DictItemsIterator', 2) - string_builder = CliClassType('pypylib', 'pypy.runtime.StringBuilder') -del T - -WEAKREF = types.weakref.classname() -PYPY_DICT_OF_VOID = '[pypylib]pypy.runtime.DictOfVoid`2<%s, int32>' -PYPY_DICT_OF_VOID_KEY = '[pypylib]pypy.runtime.DictOfVoidKey`2' - - -_lltype_to_cts = { - ootype.Void: types.void, - rffi.SHORT: types.int16, - ootype.Signed: types.int32, - ootype.Unsigned: types.uint32, - ootype.SignedLongLong: types.int64, - ootype.UnsignedLongLong: types.uint64, - ootype.Bool: types.bool, - ootype.Float: types.float64, - ootype.Char: types.char, - ootype.UniChar: types.char, - ootype.Class: types.type, - ootype.String: types.string, - ootype.StringBuilder: types.string_builder, - ootype.Unicode: types.string, - ootype.UnicodeBuilder: types.string_builder, - ootype.WeakReference: types.weakref, - ootype.Object: types.object, - - # maps generic types to their ordinal - ootype.List.SELFTYPE_T: types.list, - ootype.List.ITEMTYPE_T: types.list.paramtype(0), - ootype.Dict.SELFTYPE_T: types.dict, - ootype.Dict.KEYTYPE_T: types.dict.paramtype(0), - ootype.Dict.VALUETYPE_T: types.dict.paramtype(1), - ootype.DictItemsIterator.SELFTYPE_T: types.dict_items_iterator, - ootype.DictItemsIterator.KEYTYPE_T: types.dict_items_iterator.paramtype(0), - ootype.DictItemsIterator.VALUETYPE_T: types.dict_items_iterator.paramtype(1), - } - - -def _get_from_dict(d, key, error): - try: - return d[key] - except KeyError: - if getoption('nostop'): - log.WARNING(error) - return key - else: - assert False, error - -class CTS(object): - - ILASM_KEYWORDS = set(["at", "as", "implicitcom", "implicitres", - "noappdomain", "noprocess", "nomachine", "extern", "instance", - "explicit", "default", "vararg", "unmanaged", "cdecl", "stdcall", - "thiscall", "fastcall", "marshal", "in", "out", "opt", "retval", - "static", "public", "private", "family", "initonly", - "rtspecialname", "specialname", "assembly", "famandassem", - "famorassem", "privatescope", "literal", "notserialized", "value", - "not_in_gc_heap", "interface", "sealed", "abstract", "auto", - "sequential", "ansi", "unicode", "autochar", "bestfit", - "charmaperror", "import", "serializable", "nested", "lateinit", - "extends", "implements", "final", "virtual", "hidebysig", - "newslot", "unmanagedexp", "pinvokeimpl", "nomangle", "ole", - "lasterr", "winapi", "native", "il", "cil", "optil", "managed", - "forwardref", "runtime", "internalcall", "synchronized", - "noinlining", "custom", "fixed", "sysstring", "array", "variant", - "currency", "syschar", "void", "bool", "int8", "int16", "int32", - "int64", "float32", "float64", "error", "unsigned", "uint", - "uint8", "uint16", "uint32", "uint64", "decimal", "date", "bstr", - "lpstr", "lpwstr", "lptstr", "objectref", "iunknown", "idispatch", - "struct", "safearray", "int", "byvalstr", "tbstr", "lpvoid", - "any", "float", "lpstruct", "null", "ptr", "vector", "hresult", - "carray", "userdefined", "record", "filetime", "blob", "stream", - "storage", "streamed_object", "stored_object", "blob_object", - "cf", "clsid", "method", "class", "pinned", "modreq", "modopt", - "typedref", "type","refany", "wchar", "char", "fromunmanaged", - "callmostderived", "bytearray", "with", "init", "to", "catch", - "filter", "finally", "fault", "handler", "tls", "field", - "request", "demand", "assert", "deny", "permitonly", "linkcheck", - "inheritcheck", "reqmin", "reqopt", "reqrefuse", "prejitgrant", - "prejitdeny", "noncasdemand", "noncaslinkdemand", - "noncasinheritance", "readonly", "nometadata", "algorithm", - "fullorigin", "nan", "inf", "publickey", "enablejittracking", - "disablejitoptimizer", "preservesig", "beforefieldinit", - "alignment", "nullref", "valuetype", "compilercontrolled", - "reqsecobj", "enum", "object", "string", "true", "false", "is", - "on", "off", "add", "and", "arglist", "beq", "bge", "bgt", "ble", - "blt", "bne", "box", "br", "break", "brfalse", "brnull", "brtrue", - "call", "calli", "callvirt", "castclass", "ceq", "cgt", - "ckfinite", "clt", "conf", "constrained", "conv", "cpblk", - "cpobj", "div", "dup", "endfault", "endfilter", "endfinally", - "initblk", "initobj", "isinst", "jmp", "ldarg", "ldarga", "ldc", - "ldelem", "ldelema", "ldfld", "ldflda", "ldftn", "ldind", "ldlen", - "ldloc", "ldloca", "ldnull", "ldobj", "ldsfld", "ldsflda", - "ldstr", "ldtoken", "ldvirtftn", "leave", "localloc", "mkrefany", - "mul", "neg", "newarr", "newobj", "nop", "not", "or", "pop", - "readonly", "refanytype", "refanyval", "rem", "ret", "rethrow", - "shl", "shr", "sizeof", "starg", "stelem", "stfld", "stind", - "stloc", "stobj", "stsfld", "sub", "switch", "tail", "throw", - "unaligned", "unbox", "volatile", "xor", "ole"]) - # ole is not a keyword, but mono ilasm fails if you use it as a field/method name - - types = types # for convenience - - def __init__(self, db): - self.db = db - - def escape_name(self, name): - """Mangle then name if it's a ilasm reserved word""" - if name in self.ILASM_KEYWORDS: - return "'%s'" % name - else: - return name - - def lltype_to_cts(self, t): - if t is ootype.ROOT: - return types.object - elif isinstance(t, lltype.Ptr) and isinstance(t.TO, lltype.OpaqueType): - return types.object - elif isinstance(t, ootype.Instance): - if getattr(t, '_is_value_type', False): - cls = CliValueType - else: - cls = CliClassType - NATIVE_INSTANCE = t._hints.get('NATIVE_INSTANCE', None) - if NATIVE_INSTANCE: - return cls(None, NATIVE_INSTANCE._name) - else: - name = self.db.pending_class(t) - return cls(None, name) - elif isinstance(t, ootype.Record): - name = self.db.pending_record(t) - return CliClassType(None, name) - elif isinstance(t, ootype.StaticMethod): - delegate = self.db.record_delegate(t) - return CliClassType(None, delegate) - elif isinstance(t, ootype.Array): - item_type = self.lltype_to_cts(t.ITEM) - if item_type == types.void: # special case: Array of Void - return types.list_of_void - return CliArrayType(item_type) - elif isinstance(t, ootype.List): - item_type = self.lltype_to_cts(t.ITEM) - if item_type == types.void: # special case: List of Void - return types.list_of_void - return types.list.specialize(item_type) - elif isinstance(t, ootype.Dict): - key_type = self.lltype_to_cts(t._KEYTYPE) - value_type = self.lltype_to_cts(t._VALUETYPE) - if value_type == types.void: # special cases: Dict with voids - if key_type == types.void: - return types.dict_void_void - else: - # XXX - return CliClassType(None, PYPY_DICT_OF_VOID % key_type) - elif key_type == types.void: - assert value_type != types.void - return CliClassType(None, PYPY_DICT_OF_VOID_KEY % value_type) - return types.dict.specialize(key_type, value_type) - elif isinstance(t, ootype.DictItemsIterator): - key_type = self.lltype_to_cts(t._KEYTYPE) - value_type = self.lltype_to_cts(t._VALUETYPE) - if key_type == types.void: - key_type = types.int32 # placeholder - if value_type == types.void: - value_type = types.int32 # placeholder - return types.dict_items_iterator.specialize(key_type, value_type) - - return _get_from_dict(_lltype_to_cts, t, 'Unknown type %s' % t) - - def llvar_to_cts(self, var): - return self.lltype_to_cts(var.concretetype), var.name - - def llconst_to_cts(self, const): - return self.lltype_to_cts(const.concretetype), const.value - - def ctor_name(self, t): - return 'instance void %s::.ctor()' % self.lltype_to_cts(t) - - def static_meth_to_signature(self, sm): - from rpython.translator.oosupport import metavm - graph = getattr(sm, 'graph', None) - if graph: - return self.graph_to_signature(graph) - module, name = metavm.get_primitive_name(sm) - func_name = '[pypylib]pypy.builtin.%s::%s' % (module, name) - T = ootype.typeOf(sm) - return self.format_signatue(func_name, T.ARGS, T.RESULT) - - def graph_to_signature(self, graph, is_method = False, func_name = None): - func_name = func_name or graph.name - func_name = self.escape_name(func_name) - namespace = getattr(graph.func, '_namespace_', None) - if namespace: - func_name = '%s::%s' % (namespace, func_name) - - ARGS = [arg.concretetype for arg in graph.getargs() if arg.concretetype is not ootype.Void] - if is_method: - ARGS = ARGS[1:] - RESULT = graph.getreturnvar().concretetype - return self.format_signatue(func_name, ARGS, RESULT) - - def format_signatue(self, func_name, ARGS, RESULT): - arg_types = [self.lltype_to_cts(ARG).typename() for ARG in ARGS] - arg_list = ', '.join(arg_types) - ret_type = self.lltype_to_cts(RESULT) - - return '%s %s(%s)' % (ret_type, func_name, arg_list) - - def op_to_signature(self, op, func_name): - ret_type, ret_var = self.llvar_to_cts(op.result) - func_name = self.escape_name(func_name) - - args = [arg for arg in op.args[1:] - if arg.concretetype is not ootype.Void] - - arg_types = [self.lltype_to_cts(arg.concretetype).typename() for arg in args] - arg_list = ', '.join(arg_types) - - return '%s %s(%s)' % (ret_type, func_name, arg_list) - - - def method_signature(self, TYPE, name_or_desc): - # TODO: use callvirt only when strictly necessary - if isinstance(TYPE, ootype.Instance): - if isinstance(name_or_desc, ootype._overloaded_meth_desc): - name = name_or_desc.name - METH = name_or_desc.TYPE - virtual = True - else: - name = name_or_desc - owner, meth = TYPE._lookup(name) - METH = meth._TYPE - virtual = getattr(meth, '_virtual', True) - class_name = self.db.class_name(TYPE) - full_name = 'class %s::%s' % (class_name, self.escape_name(name)) - returntype = self.lltype_to_cts(METH.RESULT) - arg_types = [self.lltype_to_cts(ARG).typename() for ARG in METH.ARGS if ARG is not ootype.Void] - arg_list = ', '.join(arg_types) - return '%s %s(%s)' % (returntype, full_name, arg_list), virtual - - elif isinstance(TYPE, (ootype.BuiltinType, ootype.StaticMethod)): - assert isinstance(name_or_desc, str) - name = name_or_desc - if isinstance(TYPE, ootype.StaticMethod): - METH = TYPE - else: - METH = oopspec.get_method(TYPE, name) - class_name = self.lltype_to_cts(TYPE) - if isinstance(TYPE, ootype.Dict): - KEY = TYPE._KEYTYPE - VALUE = TYPE._VALUETYPE - name = name_or_desc - if KEY is ootype.Void and VALUE is ootype.Void and name == 'll_get_items_iterator': - # ugly, ugly special case - ret_type = types.dict_items_iterator.specialize(types.int32, types.int32) - elif VALUE is ootype.Void and METH.RESULT is ootype.Dict.VALUETYPE_T: - ret_type = types.void - else: - ret_type = self.lltype_to_cts(METH.RESULT) - ret_type = dict_of_void_ll_copy_hack(TYPE, ret_type) - else: - ret_type = self.lltype_to_cts(METH.RESULT) - generic_types = getattr(TYPE, '_generic_types', {}) - arg_types = [self.lltype_to_cts(arg).typename() for arg in METH.ARGS if - arg is not ootype.Void and \ - generic_types.get(arg, arg) is not ootype.Void] - arg_list = ', '.join(arg_types) - return '%s %s::%s(%s)' % (ret_type, class_name, name, arg_list), False - - else: - assert False - -def dict_of_void_ll_copy_hack(TYPE, ret_type): - # XXX: ugly hack to make the ll_copy signature correct when - # CustomDict is special-cased to DictOfVoid. - if isinstance(TYPE, ootype.CustomDict) and TYPE._VALUETYPE is ootype.Void: - return ret_type.typename().replace('Dict`2', 'DictOfVoid`2') - else: - return ret_type diff --git a/rpython/translator/cli/database.py b/rpython/translator/cli/database.py deleted file mode 100644 --- a/rpython/translator/cli/database.py +++ /dev/null @@ -1,160 +0,0 @@ -import operator -import string -from rpython.translator.cli.function import Function, log -from rpython.translator.cli.class_ import Class -from rpython.translator.cli.record import Record -from rpython.translator.cli.delegate import Delegate -from rpython.translator.cli.comparer import EqualityComparer -from rpython.translator.cli.node import Node -from rpython.translator.cli.support import string_literal, Counter -from rpython.translator.cli.cts import types -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.ootypesystem.module import ll_os, ll_math -from rpython.rtyper.ootypesystem.rtupletype import TUPLE_TYPE -from rpython.translator.cli import dotnet -from rpython.rlib.objectmodel import CDefinedIntSymbolic -from rpython.translator.oosupport.database import Database as OODatabase - -try: - set -except NameError: - from sets import Set as set - -BUILTIN_RECORDS = { - TUPLE_TYPE([ootype.Signed, ootype.Signed]): - '[pypylib]pypy.runtime.Record_Signed_Signed', - TUPLE_TYPE([ootype.String, ootype.String]): - '[pypylib]pypy.runtime.Record_String_String', - - ll_math.FREXP_RESULT: '[pypylib]pypy.runtime.Record_Float_Signed', - ll_math.MODF_RESULT: '[pypylib]pypy.runtime.Record_Float_Float', - ll_os.STAT_RESULT: '[pypylib]pypy.runtime.Record_Stat_Result', - } - -class LowLevelDatabase(OODatabase): - def __init__(self, genoo): - OODatabase.__init__(self, genoo) - self.classes = {} # INSTANCE --> class_name - self.classnames = set() # (namespace, name) - self.recordnames = {} # RECORD --> name - self.functions = {} # graph --> function_name - self.methods = {} # graph --> method_name - self.consts = {} # value --> AbstractConst - self.delegates = {} # StaticMethod --> type_name - self.const_count = Counter() # store statistics about constants - - def next_count(self): - return self.unique() - - def _default_record_name(self, RECORD): - trans = string.maketrans('[]<>(), :', '_________') - name = ['Record'] - # XXX: refactor this: we need a proper way to ensure unique names - for f_name, (FIELD_TYPE, f_default) in RECORD._fields.iteritems(): - type_name = FIELD_TYPE._short_name().translate(trans) - name.append(f_name) - name.append(type_name) - - return '__'.join(name) - - def _default_class_name(self, INSTANCE): - parts = INSTANCE._name.rsplit('.', 1) - if len(parts) == 2: - return parts - else: - return None, parts[0] - - def pending_function(self, graph, functype=None): - if functype is None: - function = self.genoo.Function(self, graph) - else: - function = functype(self, graph) - self.pending_node(function) - return function.get_name() - - def pending_class(self, INSTANCE): - try: - return self.classes[INSTANCE] - except KeyError: - pass - - if isinstance(INSTANCE, dotnet.NativeInstance): - self.classes[INSTANCE] = INSTANCE._name - return INSTANCE._name - else: - namespace, name = self._default_class_name(INSTANCE) - name = self.get_unique_class_name(namespace, name) - if namespace is None: - full_name = name - else: - full_name = '%s.%s' % (namespace, name) - self.classes[INSTANCE] = full_name - cls = Class(self, INSTANCE, namespace, name) - self.pending_node(cls) - return full_name - - def pending_record(self, RECORD): - try: - return BUILTIN_RECORDS[RECORD] - except KeyError: - pass - try: - return self.recordnames[RECORD] - except KeyError: - pass - name = self._default_record_name(RECORD) - name = self.get_unique_class_name(None, name) - self.recordnames[RECORD] = name - r = Record(self, RECORD, name) - self.pending_node(r) - return name - - def record_function(self, graph, name): - self.functions[graph] = name - - def graph_name(self, graph): - # XXX: graph name are not guaranteed to be unique - return self.functions.get(graph, None) - - def get_unique_class_name(self, namespace, name): - base_name = name - i = 0 - while (namespace, name) in self.classnames: - name = '%s_%d' % (base_name, i) - i+= 1 - self.classnames.add((namespace, name)) - return name - - def class_or_record_name(self, TYPE): - if TYPE is not ootype.ROOT and isinstance(TYPE, ootype.Instance): - return self.class_name(TYPE) - elif isinstance(TYPE, ootype.Record): - return self.get_record_name(TYPE) - elif isinstance(TYPE, ootype.OOType): - return self.cts.lltype_to_cts(TYPE) - else: - assert False - - def class_name(self, INSTANCE): - if INSTANCE is ootype.ROOT: - return types.object.classname() - try: - NATIVE_INSTANCE = INSTANCE._hints['NATIVE_INSTANCE'] - return NATIVE_INSTANCE._name - except KeyError: - return self.classes[INSTANCE] - - def get_record_name(self, RECORD): - try: - return BUILTIN_RECORDS[RECORD] - except KeyError: - return self.recordnames[RECORD] - - def record_delegate(self, TYPE): - try: - return self.delegates[TYPE] - except KeyError: - name = 'StaticMethod__%d' % len(self.delegates) - self.delegates[TYPE] = name - self.pending_node(Delegate(self, TYPE, name)) - return name diff --git a/rpython/translator/cli/delegate.py b/rpython/translator/cli/delegate.py deleted file mode 100644 --- a/rpython/translator/cli/delegate.py +++ /dev/null @@ -1,47 +0,0 @@ -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.cts import CTS -from rpython.translator.cli.node import Node - -class Delegate(Node): - def __init__(self, db, TYPE, name): - self.cts = CTS(db) - self.TYPE = TYPE - self.name = name - - def __eq__(self, other): - return self.TYPE == other.TYPE - - def __ne__(self, other): - return not self == other - - def __hash__(self): - return hash(self.TYPE) - - def get_name(self): - return self.name - - def dependencies(self): - # record we know about result and argument types - self.cts.lltype_to_cts(self.TYPE.RESULT) - for ARG in self.TYPE.ARGS: - self.cts.lltype_to_cts(ARG) - - - def render(self, ilasm): - TYPE = self.TYPE - ilasm.begin_class(self.name, '[mscorlib]System.MulticastDelegate', sealed=True) - ilasm.begin_function('.ctor', - [('object', "'object'"), ('native int', "'method'")], - 'void', - False, - 'hidebysig', 'specialname', 'rtspecialname', 'instance', 'default', - runtime=True) - ilasm.end_function() - - resulttype = self.cts.lltype_to_cts(TYPE.RESULT) - arglist = [(self.cts.lltype_to_cts(ARG), '') for ARG in TYPE.ARGS if ARG is not ootype.Void] - ilasm.begin_function('Invoke', arglist, resulttype, False, - 'virtual', 'hidebysig', 'instance', 'default', - runtime=True) - ilasm.end_function() - ilasm.end_class() diff --git a/rpython/translator/cli/dotnet.py b/rpython/translator/cli/dotnet.py deleted file mode 100644 --- a/rpython/translator/cli/dotnet.py +++ /dev/null @@ -1,747 +0,0 @@ -import types - -from rpython.tool.pairtype import pair, pairtype -from rpython.annotator.model import SomeObject, SomeInstance, SomeOOInstance, SomeInteger, s_None,\ - s_ImpossibleValue, lltype_to_annotation, annotation_to_lltype, SomeChar, SomeString, SomeOOStaticMeth -from rpython.annotator.unaryop import immutablevalue -from rpython.annotator.binaryop import _make_none_union -from rpython.annotator import model as annmodel -from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong -from rpython.rtyper.error import TyperError -from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.rtyper.rmodel import Repr -from rpython.rtyper.rint import IntegerRepr -from rpython.rtyper.ootypesystem.rootype import OOInstanceRepr -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.ootypesystem.ootype import meth, overload, Meth, StaticMethod -from rpython.translator.cli.support import PythonNet - -## Annotation model - -class SomeCliClass(SomeObject): - def getattr(self, s_attr): - assert self.is_constant() - assert s_attr.is_constant() - cliclass = self.const - attrname = s_attr.const - if attrname in cliclass._static_fields: - TYPE = cliclass._static_fields[attrname] - return OverloadingResolver.lltype_to_annotation(TYPE) - elif attrname in cliclass._static_methods: - return SomeCliStaticMethod(cliclass, attrname) - else: - return s_ImpossibleValue - - def setattr(self, s_attr, s_value): - assert self.is_constant() - assert s_attr.is_constant - cliclass = self.const - attrname = s_attr.const - if attrname not in cliclass._static_fields: - return s_ImpossibleValue - # XXX: check types? - - def simple_call(self, *s_args): - assert self.is_constant() - return SomeOOInstance(self.const._INSTANCE) - - def rtyper_makerepr(self, rtyper): - return CliClassRepr(self.const) - - def rtyper_makekey(self): - return self.__class__, self.const - - -class SomeCliStaticMethod(SomeObject): - def __init__(self, cli_class, meth_name): - self.cli_class = cli_class - self.meth_name = meth_name - - def simple_call(self, *args_s): - return self.cli_class._ann_static_method(self.meth_name, args_s) - - def rtyper_makerepr(self, rtyper): - return CliStaticMethodRepr(self.cli_class, self.meth_name) - - def rtyper_makekey(self): - return self.__class__, self.cli_class, self.meth_name - -class __extend__(SomeOOInstance): - - def simple_call(self, *s_args): - from rpython.translator.cli.query import get_cli_class - DELEGATE = get_cli_class('System.Delegate')._INSTANCE - if ootype.isSubclass(self.ootype, DELEGATE): - s_invoke = self.getattr(immutablevalue('Invoke')) - return s_invoke.simple_call(*s_args) - else: - # cannot call a non-delegate - return SomeObject.simple_call(self, *s_args) - -class __extend__(pairtype(SomeOOInstance, SomeInteger)): - def getitem((ooinst, index)): - if ooinst.ootype._isArray: - return SomeOOInstance(ooinst.ootype._ELEMENT) - return s_ImpossibleValue - - def setitem((ooinst, index), s_value): - if ooinst.ootype._isArray: - if s_value is annmodel.s_None: - return s_None - ELEMENT = ooinst.ootype._ELEMENT - VALUE = s_value.ootype - assert ootype.isSubclass(VALUE, ELEMENT) - return s_None - return s_ImpossibleValue - - -## Rtyper model - -class CliClassRepr(Repr): - lowleveltype = ootype.Void - - def __init__(self, cli_class): - self.cli_class = cli_class - - def rtype_getattr(self, hop): - attrname = hop.args_v[1].value - if attrname in self.cli_class._static_methods: - return hop.inputconst(ootype.Void, self.cli_class) - else: - assert attrname in self.cli_class._static_fields - TYPE = self.cli_class._static_fields[attrname] - c_class = hop.inputarg(hop.args_r[0], arg=0) - c_name = hop.inputconst(ootype.Void, attrname) - return hop.genop("cli_getstaticfield", [c_class, c_name], resulttype=hop.r_result.lowleveltype) - - def rtype_setattr(self, hop): - attrname = hop.args_v[1].value - assert attrname in self.cli_class._static_fields - c_class = hop.inputarg(hop.args_r[0], arg=0) - c_name = hop.inputconst(ootype.Void, attrname) - v_value = hop.inputarg(hop.args_r[2], arg=2) - return hop.genop("cli_setstaticfield", [c_class, c_name, v_value], resulttype=hop.r_result.lowleveltype) - - def rtype_simple_call(self, hop): - # TODO: resolve constructor overloading - INSTANCE = hop.args_r[0].cli_class._INSTANCE - cINST = hop.inputconst(ootype.Void, INSTANCE) - vlist = hop.inputargs(*hop.args_r)[1:] # discard the first argument - hop.exception_is_here() - return hop.genop("new", [cINST]+vlist, resulttype=hop.r_result.lowleveltype) - -class CliStaticMethodRepr(Repr): - lowleveltype = ootype.Void - - def __init__(self, cli_class, meth_name): - self.cli_class = cli_class - self.meth_name = meth_name - - def _build_desc(self, args_v): - ARGS = tuple([v.concretetype for v in args_v]) - return self.cli_class._lookup(self.meth_name, ARGS) - - def rtype_simple_call(self, hop): - vlist = [] - for i, repr in enumerate(hop.args_r[1:]): - vlist.append(hop.inputarg(repr, i+1)) - resulttype = hop.r_result.lowleveltype - desc = self._build_desc(vlist) - cDesc = hop.inputconst(ootype.Void, desc) - return hop.genop("direct_call", [cDesc] + vlist, resulttype=resulttype) - - -class __extend__(pairtype(OOInstanceRepr, IntegerRepr)): - - def rtype_getitem((r_inst, r_int), hop): - if not r_inst.lowleveltype._isArray: - raise TyperError("getitem() on a non-array instance") - v_array, v_index = hop.inputargs(r_inst, ootype.Signed) - hop.exception_is_here() - return hop.genop('cli_getelem', [v_array, v_index], hop.r_result.lowleveltype) - - def rtype_setitem((r_inst, r_int), hop): - if not r_inst.lowleveltype._isArray: - raise TyperError("setitem() on a non-array instance") - vlist = hop.inputargs(*hop.args_r) - hop.exception_is_here() - return hop.genop('cli_setelem', vlist, hop.r_result.lowleveltype) - - -class __extend__(OOInstanceRepr): - - def rtype_len(self, hop): - if not self.lowleveltype._isArray: - raise TypeError("len() on a non-array instance") - vlist = hop.inputargs(*hop.args_r) - hop.exception_cannot_occur() - return hop.genop('cli_arraylength', vlist, hop.r_result.lowleveltype) - - def rtype_simple_call(self, hop): - TYPE = self.lowleveltype - _, meth = TYPE._lookup('Invoke') - assert isinstance(meth, ootype._overloaded_meth) - ARGS = tuple([repr.lowleveltype for repr in hop.args_r[1:]]) - desc = meth._get_desc('Invoke', ARGS) - cname = hop.inputconst(ootype.Void, desc) - vlist = hop.inputargs(self, *hop.args_r[1:]) - hop.exception_is_here() - return hop.genop("oosend", [cname]+vlist, - resulttype = hop.r_result.lowleveltype) - - -## OOType model - -class OverloadingResolver(ootype.OverloadingResolver): - - def _can_convert_from_to(self, ARG1, ARG2): - if ARG1 is ootype.Void and isinstance(ARG2, NativeInstance): - return True # ARG1 could be None, that is always convertible to a NativeInstance - else: - return ootype.OverloadingResolver._can_convert_from_to(self, ARG1, ARG2) - - def annotation_to_lltype(cls, ann): - if isinstance(ann, SomeChar): - return ootype.Char - elif isinstance(ann, SomeString): - return ootype.String - else: - return annotation_to_lltype(ann) - annotation_to_lltype = classmethod(annotation_to_lltype) - - def lltype_to_annotation(cls, TYPE): - if isinstance(TYPE, NativeInstance): - return SomeOOInstance(TYPE) - elif TYPE is ootype.Char: - return SomeChar() - elif TYPE is ootype.String: - return SomeString(can_be_None=True) - else: - return lltype_to_annotation(TYPE) - lltype_to_annotation = classmethod(lltype_to_annotation) - - -class _static_meth(object): - - def __init__(self, TYPE): - self._TYPE = TYPE - - def _set_attrs(self, cls, name): - self._cls = cls - self._name = name - - def _get_desc(self, ARGS): - #assert ARGS == self._TYPE.ARGS - return self - - -class _overloaded_static_meth(object): - def __init__(self, *overloadings, **attrs): - resolver = attrs.pop('resolver', OverloadingResolver) - assert not attrs - self._resolver = resolver(overloadings) - - def _set_attrs(self, cls, name): - for meth in self._resolver.overloadings: - meth._set_attrs(cls, name) - - def _get_desc(self, ARGS): - meth = self._resolver.resolve(ARGS) - assert isinstance(meth, _static_meth) - return meth._get_desc(ARGS) - - -class NativeInstance(ootype.Instance): - def __init__(self, assembly, namespace, name, superclass, - fields={}, methods={}, _is_root=False, _hints = {}): - fullname = '%s%s.%s' % (assembly, namespace, name) - self._namespace = namespace - self._classname = name - self._is_value_type = False - ootype.Instance.__init__(self, fullname, superclass, fields, methods, _is_root, _hints) - - -## RPython interface definition - -class CliClass(object): - def __init__(self, INSTANCE, static_methods, static_fields): - self._name = INSTANCE._name - self._INSTANCE = INSTANCE - self._static_methods = {} - self._static_fields = {} - self._add_methods(static_methods) - - def __repr__(self): - return '<%s>' % (self,) - - def __str__(self): - return '%s(%s)' % (self.__class__.__name__, self._INSTANCE._name) - - def _add_methods(self, methods): - self._static_methods.update(methods) - for name, meth in methods.iteritems(): - meth._set_attrs(self, name) - - def _add_static_fields(self, fields): - self._static_fields.update(fields) - - def _lookup(self, meth_name, ARGS): - meth = self._static_methods[meth_name] - return meth._get_desc(ARGS) - - def _ann_static_method(self, meth_name, args_s): - meth = self._static_methods[meth_name] - return meth._resolver.annotate(args_s) - - def _load_class(self): - names = self._INSTANCE._namespace.split('.') - names.append(self._INSTANCE._classname) - obj = PythonNet - for name in names: - obj = getattr(obj, name) - self._PythonNet_class = obj - - def __getattr__(self, attr): - if attr in self._static_methods or attr in self._static_fields: - self._load_class() - return getattr(self._PythonNet_class, attr) - else: - raise AttributeError, attr - - def __call__(self, *args): - self._load_class() - return self._PythonNet_class(*args) - - -class Entry(ExtRegistryEntry): - _type_ = CliClass - - def compute_annotation(self): - return SomeCliClass() - - def compute_result_annotation(self): - return SomeOOInstance(self.instance._INSTANCE) - - -BOXABLE_TYPES = [ootype.Signed, ootype.Unsigned, ootype.SignedLongLong, - ootype.UnsignedLongLong, ootype.Bool, ootype.Float, - ootype.Char, ootype.String] - -class BoxedSpace: - objects = {} - index = 0 - def put(cls, obj): - index = cls.index - cls.objects[index] = obj - cls.index += 1 - return index - put = classmethod(put) - - def get(cls, index): - return cls.objects[index] - get = classmethod(get) - -def box(x): - t = type(x) - if t is int: - return CLR.System.Int32(x) - elif t is r_uint: - return CLR.System.UInt32(x) - elif t is r_longlong: - return CLR.System.Int64(x) - elif t is r_ulonglong: - return CLR.System.UInt64(x) - elif t is bool: - return CLR.System.Boolean(x) - elif t is float: - return CLR.System.Double(x) - elif t is str or t is unicode: - if len(x) == 1: - return CLR.System.Char(x) - else: - return CLR.System.String(x) - elif isinstance(x, ootype._class): - if hasattr(x, '_FUNC'): - TYPE = x._FUNC - assert isinstance(TYPE, ootype.StaticMethod) - return typeof(TYPE) - elif x is ootype.nullruntimeclass: - return None - else: - name = x._INSTANCE._assembly_qualified_name - t = CLR.System.Type.GetType(name) - assert t is not None - return t - elif isinstance(x, PythonNet.System.Object): - return x - elif x is None: - return None - else: - # cast RPython instances to System.Object is trivial when - # translated but not when interpreting, because Python for - # .NET doesn't support passing aribrary Python objects to - # .NET. To solve, we store them in the BoxedSpace, then we - # return an opaque objects, which will be used by unbox to - # retrieve the original RPython instance. - index = BoxedSpace.put(x) - res = PythonNet.pypy.test.ObjectWrapper(index) - return res - -def unbox(x, TYPE): - if isinstance(x, PythonNet.pypy.test.ObjectWrapper): - x = BoxedSpace.get(x.index) - - if isinstance(TYPE, (type, types.ClassType)): - # we need to check the TYPE and return None if it fails - if isinstance(x, TYPE): - return x - else: - return None - - if isinstance(TYPE, ootype.OOType) and TYPE is not ootype.String and not isinstance(TYPE, ootype.StaticMethod): - try: - return ootype.enforce(TYPE, x) - except TypeError: - return None - - # TODO: do the typechecking also in the other cases - - # this is a workaround against a pythonnet limitation: you can't - # directly get the, e.g., python int from the System.Int32 object: - # a simple way to do this is to put it into an ArrayList and - # retrieve the value. - tmp = PythonNet.System.Collections.ArrayList() - tmp.Add(x) - return tmp[0] - - -class Entry(ExtRegistryEntry): - _about_ = box - - def compute_result_annotation(self, x_s): - can_be_None = getattr(x_s, 'can_be_None', False) - return SomeOOInstance(CLR.System.Object._INSTANCE, can_be_None=can_be_None) - - def specialize_call(self, hop): - v_obj, = hop.inputargs(*hop.args_r) - - hop.exception_cannot_occur() - TYPE = v_obj.concretetype - if (TYPE is ootype.String or isinstance(TYPE, (ootype.OOType, NativeInstance))): - return hop.genop('ooupcast', [v_obj], hop.r_result.lowleveltype) - else: - if TYPE not in BOXABLE_TYPES: - raise TyperError, "Can't box values of type %s" % v_obj.concretetype - return hop.genop('clibox', [v_obj], hop.r_result.lowleveltype) - - -class Entry(ExtRegistryEntry): - _about_ = unbox - - def compute_result_annotation(self, x_s, type_s): - assert isinstance(x_s, SomeOOInstance) - assert isinstance(x_s.ootype, NativeInstance) - assert type_s.is_constant() - TYPE = type_s.const - if isinstance(TYPE, (type, types.ClassType)): - # it's a user-defined class, so we return SomeInstance - # can_be_None == True because it can always return None, if it fails - classdef = self.bookkeeper.getuniqueclassdef(TYPE) - return SomeInstance(classdef, can_be_None=True) - elif TYPE in BOXABLE_TYPES: - return OverloadingResolver.lltype_to_annotation(TYPE) - elif isinstance(TYPE, ootype.StaticMethod): - return SomeOOStaticMeth(TYPE) - elif isinstance(TYPE, ootype.OOType): - return SomeOOInstance(TYPE) - else: - assert False - - - def specialize_call(self, hop): - hop.exception_cannot_occur() - assert hop.args_s[1].is_constant() - TYPE = hop.args_s[1].const - v_obj = hop.inputarg(hop.args_r[0], arg=0) - if TYPE is ootype.String or isinstance(TYPE, (type, types.ClassType)) or isinstance(TYPE, ootype.OOType): - return hop.genop('oodowncast', [v_obj], hop.r_result.lowleveltype) - else: - c_type = hop.inputconst(ootype.Void, TYPE) - return hop.genop('cliunbox', [v_obj, c_type], hop.r_result.lowleveltype) - - - -native_exc_cache = {} -def NativeException(cliClass): - try: - return native_exc_cache[cliClass._name] - except KeyError: - res = _create_NativeException(cliClass) - native_exc_cache[cliClass._name] = res - return res - -def _create_NativeException(cliClass): - from rpython.translator.cli.support import getattr_ex - TYPE = cliClass._INSTANCE - if PythonNet.__name__ in ('CLR', 'clr'): - # we are using pythonnet -- use the .NET class - name = '%s.%s' % (TYPE._namespace, TYPE._classname) - res = getattr_ex(PythonNet, name) - else: - # we are not using pythonnet -- create a fake class - res = types.ClassType(TYPE._classname, (Exception,), {}) - res._rpython_hints = {'NATIVE_INSTANCE': TYPE} - return res - -def native_exc(exc): - return exc - -class Entry(ExtRegistryEntry): - _about_ = native_exc - - def compute_result_annotation(self, exc_s): - assert isinstance(exc_s, SomeInstance) - cls = exc_s.classdef.classdesc.pyobj - assert issubclass(cls, Exception) - NATIVE_INSTANCE = cls._rpython_hints['NATIVE_INSTANCE'] - return SomeOOInstance(NATIVE_INSTANCE) - - def specialize_call(self, hop): - v_obj, = hop.inputargs(*hop.args_r) - hop.exception_cannot_occur() - return hop.genop('same_as', [v_obj], hop.r_result.lowleveltype) - -def new_array(type, length): - # PythonNet doesn't provide a straightforward way to create arrays, - # let's use reflection instead - - # hack to produce the array type name from the member type name - typename = type._INSTANCE._assembly_qualified_name - parts = typename.split(',') - parts[0] = parts[0] + '[]' - typename = ','.join(parts) - t = PythonNet.System.Type.GetType(typename) - ctor = t.GetConstructors()[0] - return ctor.Invoke([length]) - -def init_array(type, *args): - array = new_array(type, len(args)) - for i, arg in enumerate(args): - array[i] = arg - return array - -class Entry(ExtRegistryEntry): - _about_ = new_array - - def compute_result_annotation(self, type_s, length_s): - from rpython.translator.cli.query import get_cli_class - assert type_s.is_constant() - assert isinstance(length_s, SomeInteger) - TYPE = type_s.const._INSTANCE - fullname = '%s.%s[]' % (TYPE._namespace, TYPE._classname) - cliArray = get_cli_class(fullname) - return SomeOOInstance(cliArray._INSTANCE) - - def specialize_call(self, hop): - c_type, v_length = hop.inputargs(*hop.args_r) - hop.exception_cannot_occur() - return hop.genop('cli_newarray', [c_type, v_length], hop.r_result.lowleveltype) - - -class Entry(ExtRegistryEntry): - _about_ = init_array - - def compute_result_annotation(self, type_s, *args_s): - from rpython.translator.cli.query import get_cli_class - assert type_s.is_constant() - TYPE = type_s.const._INSTANCE - for i, arg_s in enumerate(args_s): - if TYPE is not arg_s.ootype: - raise TypeError, 'Wrong type of arg #%d: %s expected, %s found' % \ - (i, TYPE, arg_s.ootype) - fullname = '%s.%s[]' % (TYPE._namespace, TYPE._classname) - cliArray = get_cli_class(fullname) - return SomeOOInstance(cliArray._INSTANCE) - - def specialize_call(self, hop): - vlist = hop.inputargs(*hop.args_r) - c_type, v_elems = vlist[0], vlist[1:] - c_length = hop.inputconst(ootype.Signed, len(v_elems)) - hop.exception_cannot_occur() - v_array = hop.genop('cli_newarray', [c_type, c_length], hop.r_result.lowleveltype) - for i, v_elem in enumerate(v_elems): - c_index = hop.inputconst(ootype.Signed, i) - hop.genop('cli_setelem', [v_array, c_index, v_elem], ootype.Void) - return v_array - -def typeof(cliClass_or_type): - if isinstance(cliClass_or_type, ootype.StaticMethod): - FUNCTYPE = cliClass_or_type - cliClass = known_delegates[FUNCTYPE] - else: - assert isinstance(cliClass_or_type, CliClass) - cliClass = cliClass_or_type - TYPE = cliClass._INSTANCE - return PythonNet.System.Type.GetType(TYPE._assembly_qualified_name) - -def classof(cliClass_or_type): - if isinstance(cliClass_or_type, ootype.StaticMethod): - try: - FUNC = cliClass_or_type - return known_delegates_class[FUNC] - except KeyError: - cls = ootype._class(ootype.ROOT) - cls._FUNC = FUNC - known_delegates_class[FUNC] = cls - return cls - else: - assert isinstance(cliClass_or_type, CliClass) - TYPE = cliClass_or_type._INSTANCE - return ootype.runtimeClass(TYPE) - -class Entry(ExtRegistryEntry): - _about_ = typeof - - def compute_result_annotation(self, cliClass_s): - from rpython.translator.cli.query import get_cli_class - assert cliClass_s.is_constant() - cliType = get_cli_class('System.Type') - return SomeOOInstance(cliType._INSTANCE) - - def specialize_call(self, hop): - v_type, = hop.inputargs(*hop.args_r) - hop.exception_cannot_occur() - return hop.genop('cli_typeof', [v_type], hop.r_result.lowleveltype) - - -def eventhandler(obj): - return CLR.System.EventHandler(obj) - -class Entry(ExtRegistryEntry): - _about_ = eventhandler - - def compute_result_annotation(self, s_value): - from rpython.translator.cli.query import get_cli_class - cliType = get_cli_class('System.EventHandler') - return SomeOOInstance(cliType._INSTANCE) - - def specialize_call(self, hop): - v_obj, = hop.inputargs(*hop.args_r) - methodname = hop.args_r[0].methodname - c_methodname = hop.inputconst(ootype.Void, methodname) - hop.exception_cannot_occur() - return hop.genop('cli_eventhandler', [v_obj, c_methodname], hop.r_result.lowleveltype) - - -def clidowncast(obj, TYPE): - return obj - -class Entry(ExtRegistryEntry): - _about_ = clidowncast - - def compute_result_annotation(self, s_value, s_type): - if isinstance(s_type.const, ootype.OOType): - TYPE = s_type.const - else: - cliClass = s_type.const - TYPE = cliClass._INSTANCE - assert ootype.isSubclass(TYPE, s_value.ootype) - return SomeOOInstance(TYPE) - - def specialize_call(self, hop): - assert isinstance(hop.args_s[0], annmodel.SomeOOInstance) - v_inst = hop.inputarg(hop.args_r[0], arg=0) - hop.exception_cannot_occur() - return hop.genop('oodowncast', [v_inst], resulttype = hop.r_result.lowleveltype) - - -def cliupcast(obj, TYPE): - return obj - -class Entry(ExtRegistryEntry): - _about_ = cliupcast - - def compute_result_annotation(self, s_value, s_type): - if isinstance(s_type.const, ootype.OOType): - TYPE = s_type.const - else: - cliClass = s_type.const - TYPE = cliClass._INSTANCE - assert ootype.isSubclass(s_value.ootype, TYPE) - return SomeOOInstance(TYPE) - - def specialize_call(self, hop): - assert isinstance(hop.args_s[0], annmodel.SomeOOInstance) - v_inst = hop.inputarg(hop.args_r[0], arg=0) - hop.exception_cannot_occur() - return hop.genop('ooupcast', [v_inst], resulttype = hop.r_result.lowleveltype) - - -def cast_to_native_object(obj): - raise TypeError, "cast_to_native_object is meant to be rtyped and not called direclty" - -def cast_from_native_object(obj): - raise TypeError, "cast_from_native_object is meant to be rtyped and not called direclty" - -class Entry(ExtRegistryEntry): - _about_ = cast_to_native_object - - def compute_result_annotation(self, s_value): - assert isinstance(s_value, annmodel.SomeOOObject) - assert s_value.ootype is ootype.Object - return SomeOOInstance(CLR.System.Object._INSTANCE) - - def specialize_call(self, hop): - assert isinstance(hop.args_s[0], annmodel.SomeOOObject) - v_obj, = hop.inputargs(*hop.args_r) - hop.exception_cannot_occur() - return hop.genop('ooupcast', [v_obj], hop.r_result.lowleveltype) - -class Entry(ExtRegistryEntry): - _about_ = cast_from_native_object - - def compute_result_annotation(self, s_value): - assert isinstance(s_value, annmodel.SomeOOInstance) - assert s_value.ootype is CLR.System.Object._INSTANCE - return annmodel.SomeOOObject() - - def specialize_call(self, hop): - v_obj = hop.inputarg(hop.args_r[0], arg=0) - hop.exception_cannot_occur() - return hop.genop('oodowncast', [v_obj], hop.r_result.lowleveltype) - - - -from rpython.translator.cli.query import CliNamespace -CLR = CliNamespace(None) -CLR._buildtree() - -known_delegates = { - ootype.StaticMethod([], ootype.Signed): CLR.pypy.test.DelegateType_int__0, - ootype.StaticMethod([ootype.Signed, ootype.Float], ootype.Float): CLR.pypy.test.DelegateType_double_int_double, - ootype.StaticMethod([ootype.Float], ootype.Float): CLR.pypy.test.DelegateType_double__double_1, - ootype.StaticMethod([ootype.Bool], ootype.Bool): CLR.pypy.test.DelegateType_bool_bool_1, - ootype.StaticMethod([ootype.Char], ootype.Char): CLR.pypy.test.DelegateType_char_char_1, - ootype.StaticMethod([ootype.Signed], ootype.Void): CLR.pypy.test.DelegateType_void_int_1, - ootype.StaticMethod([ootype.Signed], ootype.Signed): CLR.pypy.test.DelegateType_int__int_1, - ootype.StaticMethod([ootype.Signed] * 2, ootype.Signed): CLR.pypy.test.DelegateType_int__int_2, - ootype.StaticMethod([ootype.Signed] * 3, ootype.Signed): CLR.pypy.test.DelegateType_int__int_3, - ootype.StaticMethod([ootype.Signed] * 5, ootype.Signed): CLR.pypy.test.DelegateType_int__int_5, - ootype.StaticMethod([ootype.Signed] * 27, ootype.Signed): CLR.pypy.test.DelegateType_int__int_27, - ootype.StaticMethod([ootype.Signed] * 100, ootype.Signed): CLR.pypy.test.DelegateType_int__int_100 - } - -known_delegates_class = {} - -cVoid = classof(CLR.System.Void) -def class2type(cls): - 'Cast a PBC of type ootype.Class into a System.Type instance' - if cls is cVoid: - return None - return clidowncast(box(cls), CLR.System.Type) - -def type2class(clitype): - 'Cast a System.Type instance to a PBC of type ootype.Class' -## if clitype is None: -## return cVoid - return unbox(clitype, ootype.Class) diff --git a/rpython/translator/cli/entrypoint.py b/rpython/translator/cli/entrypoint.py deleted file mode 100644 --- a/rpython/translator/cli/entrypoint.py +++ /dev/null @@ -1,94 +0,0 @@ -from rpython.translator.cli.cts import CTS -from rpython.translator.cli.database import LowLevelDatabase -from rpython.translator.cli.node import Node -from rpython.rtyper.ootypesystem import ootype - -def get_entrypoint(graph): - from rpython.translator.cli.test.runtest import TestEntryPoint - try: - ARG0 = graph.getargs()[0].concretetype - except IndexError: - ARG0 = None - if isinstance(ARG0, ootype.List) and ARG0.ITEM is ootype.String: - return StandaloneEntryPoint(graph) - else: - return TestEntryPoint(graph) - -class BaseEntryPoint(Node): - isnetmodule = False - - def set_db(self, db): - self.db = db - self.cts = CTS(db) - - def ilasm_flags(self): - return [] - - def output_filename(self, il_filename): - return il_filename.replace('.il', '.exe') - - -class StandaloneEntryPoint(BaseEntryPoint): - """ - This class produces a 'main' method that converts the argv in a From noreply at buildbot.pypy.org Sun Jul 7 14:22:26 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 14:22:26 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove rpython.translator.jvm Message-ID: <20130707122226.DD1681C0512@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65253:8e9fdaf6b2a3 Date: 2013-07-07 13:46 +0200 http://bitbucket.org/pypy/pypy/changeset/8e9fdaf6b2a3/ Log: Remove rpython.translator.jvm diff too long, truncating to 2000 out of 8635 lines diff --git a/rpython/translator/jvm/__init__.py b/rpython/translator/jvm/__init__.py deleted file mode 100644 diff --git a/rpython/translator/jvm/builtin.py b/rpython/translator/jvm/builtin.py deleted file mode 100644 --- a/rpython/translator/jvm/builtin.py +++ /dev/null @@ -1,171 +0,0 @@ -import rpython.translator.jvm.typesystem as jvm -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.jvm.typesystem import \ - jInt, jVoid, jStringBuilder, jString, jPyPy, jChar, jArrayList, jObject, \ - jBool, jHashMap, jPyPyDictItemsIterator, Generifier, jCharSequence, \ - jPyPyCustomDict - -# ______________________________________________________________________ -# Mapping of built-in OOTypes to JVM types - -class JvmBuiltInType(jvm.JvmClassType): - - """ - Represents built-in types to JVM. May optionally be associated - with an OOTYPE; if it is, then we will support lookup of the OOTYPE - methods and will re-map them as needed to the JVM equivalents. - """ - - def __init__(self, db, classty, OOTYPE): - jvm.JvmClassType.__init__(self, classty.name) - self.db = db - self.OOTYPE = OOTYPE - self.gen = Generifier(OOTYPE) - - def __eq__(self, other): - return isinstance(other, JvmBuiltInType) and other.name == self.name - - def __hash__(self): - return hash(self.name) - - def lookup_field(self, fieldnm): - """ Given a field name, returns a jvm.Field object """ - _, FIELDTY = self.OOTYPE._lookup_field(fieldnm) - jfieldty = self.db.lltype_to_cts(FIELDTY) - return jvm.Field( - self.descriptor.class_name(), fieldnm, jfieldty, False) - - def lookup_method(self, methodnm): - """ Given the method name, returns a jvm.Method object """ - - # Look for a shortcut method in our table of remappings: - try: - key = (self.OOTYPE.__class__, methodnm) - return built_in_methods[key] - except KeyError: pass - - # Otherwise, determine the Method object automagically - # First, map the OOTYPE arguments and results to - # the java types they will be at runtime. Note that - # we must use the erased types for this. - ARGS, RESULT = self.gen.erased_types(methodnm) - jargtypes = [self.db.lltype_to_cts(P) for P in ARGS] - jrettype = self.db.lltype_to_cts(RESULT) - - if self.OOTYPE.__class__ in bridged_objects: - # Bridged objects are ones where we have written a java class - # that has methods with the correct names and types already - return jvm.Method.v(self, methodnm, jargtypes, jrettype) - else: - # By default, we assume it is a static method on the PyPy - # object, that takes an instance of this object as the first - # argument. The other arguments we just convert to java versions, - # except for generics. - jargtypes = [self] + jargtypes - return jvm.Method.s(jPyPy, methodnm, jargtypes, jrettype) - -# When we lookup a method on a BuiltInClassNode, we first check the -# 'built_in_methods' and 'bridged_objects' tables. This allows us to -# redirect to other methods if we like. - -bridged_objects = ( - ootype.DictItemsIterator, - ootype.WeakReference.__class__ - ) - -built_in_methods = { - - # Note: String and StringBuilder are rebound in ootype, and thus - # .__class__ is required - - (ootype.StringBuilder.__class__, "ll_allocate"): - jvm.Method.v(jStringBuilder, "ensureCapacity", (jInt,), jVoid), - - (ootype.StringBuilder.__class__, "ll_build"): - jvm.Method.v(jStringBuilder, "toString", (), jString), - - (ootype.StringBuilder.__class__, "ll_getlength"): - jvm.Method.v(jStringBuilder, "length", (), jInt), - - (ootype.String.__class__, "ll_hash"): - jvm.Method.v(jString, "hashCode", (), jInt), - - (ootype.String.__class__, "ll_streq"): - jvm.Method.v(jString, "equals", (jObject,), jBool), - - (ootype.String.__class__, "ll_strlen"): - jvm.Method.v(jString, "length", (), jInt), - - (ootype.String.__class__, "ll_stritem_nonneg"): - jvm.Method.v(jString, "charAt", (jInt,), jChar), - - (ootype.String.__class__, "ll_startswith"): - jvm.Method.v(jString, "startsWith", (jString,), jBool), - - (ootype.String.__class__, "ll_endswith"): - jvm.Method.v(jString, "endsWith", (jString,), jBool), - - (ootype.String.__class__, "ll_strcmp"): - jvm.Method.v(jString, "compareTo", (jString,), jInt), - - (ootype.String.__class__, "ll_upper"): - jvm.Method.v(jString, "toUpperCase", (), jString), - - (ootype.String.__class__, "ll_lower"): - jvm.Method.v(jString, "toLowerCase", (), jString), - - (ootype.String.__class__, "ll_replace_chr_chr"): - jvm.Method.v(jString, "replace", (jChar, jChar), jString), - - (ootype.Dict, "ll_set"): - jvm.Method.v(jHashMap, "put", (jObject, jObject), jObject), - - (ootype.Dict, "ll_get"): - jvm.Method.v(jHashMap, "get", (jObject,), jObject), - - (ootype.Dict, "ll_contains"): - jvm.Method.v(jHashMap, "containsKey", (jObject,), jBool), - - (ootype.Dict, "ll_length"): - jvm.Method.v(jHashMap, "size", (), jInt), - - (ootype.Dict, "ll_clear"): - jvm.Method.v(jHashMap, "clear", (), jVoid), - - (ootype.CustomDict, "ll_set"): - jvm.Method.v(jPyPyCustomDict, "put", (jObject, jObject), jObject), - - (ootype.CustomDict, "ll_get"): - jvm.Method.v(jPyPyCustomDict, "get", (jObject,), jObject), - - (ootype.CustomDict, "ll_contains"): - jvm.Method.v(jPyPyCustomDict, "containsKey", (jObject,), jBool), - - (ootype.CustomDict, "ll_length"): - jvm.Method.v(jPyPyCustomDict, "size", (), jInt), - - (ootype.CustomDict, "ll_clear"): - jvm.Method.v(jPyPyCustomDict, "clear", (), jVoid), - - (ootype.List, "ll_length"): - jvm.Method.v(jArrayList, "size", (), jInt), - - (ootype.List, "ll_getitem_fast"): - jvm.Method.v(jArrayList, "get", (jInt,), jObject), - - } - -# ootype.String[Builder] and ootype.Unicode[Builder] are mapped to the -# same JVM type, so we reuse the same builtin methods also for them -def add_unicode_methods(): - mapping = { - ootype.String.__class__: ootype.Unicode.__class__, - ootype.StringBuilder.__class__: ootype.UnicodeBuilder.__class__ - } - - for (TYPE, name), value in built_in_methods.items(): - if TYPE in mapping: - TYPE = mapping[TYPE] - built_in_methods[TYPE, name] = value -add_unicode_methods() -del add_unicode_methods diff --git a/rpython/translator/jvm/cmpopcodes.py b/rpython/translator/jvm/cmpopcodes.py deleted file mode 100644 --- a/rpython/translator/jvm/cmpopcodes.py +++ /dev/null @@ -1,115 +0,0 @@ -from rpython.translator.jvm.typesystem import \ - IFLT, IFLE, IFEQ, IFNE, IFGT, IFGE, \ - IFNONNULL, IFNULL, IF_ACMPEQ, GOTO, ICONST, \ - DCONST_0, DCMPG, LCONST_0, LCMP, \ - IF_ICMPLT, IF_ICMPLE, IF_ICMPEQ, IF_ICMPNE, IF_ICMPGT, IF_ICMPGE, \ - PYPYUINTCMP, PYPYULONGCMP - -from rpython.translator.jvm.generator import \ - Label - -##### Branch directly as the result of a comparison - -# A table mapping the kind of numeric type to the opcode or method -# needed to compare two values of that type. The result of this -# opcode of method is that a -1, 0, or 1 is pushed on the stack, -# like the cmp() method in Python. Used to prepare the cmp_opname -# table below. -cmp_numeric_prep = { - 'uint': PYPYUINTCMP, - 'float': DCMPG, - 'llong': LCMP, - 'ullong': PYPYULONGCMP, - } - -# A table mapping the kind of comparison to the opcode which -# performs that comparison and then branches. Used to prepare the -# cmp_opname table below. -cmp_numeric_branch = { - 'lt': IFLT, - 'le': IFLE, - 'eq': IFEQ, - 'ne': IFNE, - 'gt': IFGT, - 'ge': IFGE, - } - -# A table that maps an opname to a set of instructions for -# performing a comparison. Some entries are inserted -# automatically, either because they do not fit the numeric -# pattern or are exceptions, and others are created from the -# cmp_numeric_{prep,branch} tables above. In all cases, the -# instructions are a list of opcode/functions which will be -# emitted. The last one must be a branching instruction. -cmp_opname = { - # Single operand entries: - 'bool_not': [IFEQ], - 'int_is_true': [IFNE], - 'uint_is_true': [IFNE], - 'float_is_true': [DCONST_0, DCMPG, IFNE], - 'llong_is_true': [LCONST_0, LCMP, IFNE], - 'ullong_is_true': [LCONST_0, LCMP, IFNE], - - # Double operand entries: - 'oononnull': [IFNONNULL], - 'ooisnull': [IFNULL], - 'oois': [IF_ACMPEQ], - - 'unichar_eq': [IF_ICMPEQ], - 'unichar_ne': [IF_ICMPNE], - - 'char_eq': [IF_ICMPEQ], - 'char_ne': [IF_ICMPNE], - 'char_lt': [IF_ICMPLT], - 'char_le': [IF_ICMPLE], - 'char_gt': [IF_ICMPGT], - 'char_ge': [IF_ICMPGE], - - 'int_eq': [IF_ICMPEQ], - 'int_ne': [IF_ICMPNE], - 'int_lt': [IF_ICMPLT], - 'int_le': [IF_ICMPLE], - 'int_gt': [IF_ICMPGT], - 'int_ge': [IF_ICMPGE], - - 'int_eq_ovf': [IF_ICMPEQ], - 'int_ne_ovf': [IF_ICMPNE], - 'int_lt_ovf': [IF_ICMPLT], - 'int_le_ovf': [IF_ICMPLE], - 'int_gt_ovf': [IF_ICMPGT], - 'int_ge_ovf': [IF_ICMPGE], - - 'uint_eq': [IF_ICMPEQ], - 'uint_ne': [IF_ICMPNE], - } - -# fill in the default entries like uint_lt, llong_eq: -for (prepnm, prepfn) in cmp_numeric_prep.items(): - for (brnm, brfn) in cmp_numeric_branch.items(): - opname = "%s_%s" % (prepnm, brnm) - if opname not in cmp_opname: - cmp_opname[opname] = [prepfn, brfn] - -def can_branch_directly(opname): - """ - Returns true if opname is the kind of instruction where we can - branch directly based on its result without storing it into a - variable anywhere. For example, int_lt is such an instruction. - This is used to optimize away intermediate boolean values, which - otherwise force us to generate very inefficient and hard-to-read - code. - """ - return opname in cmp_opname - -def branch_if(gen, opname, truelabel): - """ - Branches to 'truelabel' if 'opname' would return true. - 'opname' must be something like int_lt, float_ne, etc, - as determined by can_branch_directly(). - """ - assert can_branch_directly(opname) - assert isinstance(truelabel, Label) - instrs = cmp_opname[opname] - for i in instrs[:-1]: gen.emit(i) - gen.emit(instrs[-1], truelabel) - diff --git a/rpython/translator/jvm/conftest.py b/rpython/translator/jvm/conftest.py deleted file mode 100644 --- a/rpython/translator/jvm/conftest.py +++ /dev/null @@ -1,28 +0,0 @@ - -import py, sys - -def pytest_runtest_setup(item): - if sys.maxint > 2147483647: # 64bit platform - py.test.skip("jvm backend on 64bit unsupported") - -def pytest_addoption(parser): - group = parser.getgroup("pypy-jvm options") - group.addoption('--java', action='store', dest='java', default='java', - help='Define the java executable to use') - group.addoption('--javac', action='store', dest='javac', - default='javac', - help='Define the javac executable to use') - group.addoption('--jasmin', action='store', dest='java', default='java', - help='Define the jasmin script to use') - group.addoption('--noassemble', action='store_true', dest="noasm", - default=False, - help="don't assemble jasmin files") - group.addoption('--package', action='store', dest='package', - default='pypy', - help='Package to output generated classes into') - - group.addoption('--byte-arrays', action='store_true', - dest='byte-arrays', - default=False, - help='Use byte arrays rather than native strings') - diff --git a/rpython/translator/jvm/constant.py b/rpython/translator/jvm/constant.py deleted file mode 100644 --- a/rpython/translator/jvm/constant.py +++ /dev/null @@ -1,207 +0,0 @@ -from rpython.rtyper.ootypesystem import ootype -from rpython.flowspace import model as flowmodel -import rpython.translator.jvm.typesystem as jvm -from rpython.translator.jvm.typesystem import \ - jVoid, Method, Field -from rpython.translator.oosupport.constant import \ - BaseConstantGenerator, RecordConst, InstanceConst, ClassConst, \ - StaticMethodConst, CustomDictConst, WeakRefConst, push_constant, \ - MAX_CONST_PER_STEP - -jPyPyConstantInit = jvm.JvmClassType('pypy.ConstantInit') -jPyPyConstantInitMethod = Method.s(jPyPyConstantInit, 'init', [], jVoid) - -# ___________________________________________________________________________ -# Constant Generator - -class JVMConstantGenerator(BaseConstantGenerator): - - MAX_INSTRUCTION_COUNT = 20000 - - def __init__(self, db): - BaseConstantGenerator.__init__(self, db) - self.num_constants = 0 - self.ccs = [] - - def runtime_init(self, gen): - """ - Called from node.EntryPoint to generate code that initializes - all of the constants. Right now, this invokes a known static - method, but this should probably be changed eventually. - """ - gen.emit(jPyPyConstantInitMethod) - - # _________________________________________________________________ - # Constant Operations - # - # We store constants in static fields of a constant class; we - # generate new constant classes every MAX_CONST_PER_STEP constants - # to avoid any particular class getting too big. - - def _init_constant(self, const): - # Determine the Java type of the constant: some constants - # (weakrefs) do not have an OOTYPE, so if it returns None use - # jtype() - JFIELDOOTY = const.OOTYPE() - if not JFIELDOOTY: jfieldty = const.jtype() - else: jfieldty = self.db.lltype_to_cts(JFIELDOOTY) - - # Every MAX_CONST_PER_STEP constants, we create a new class. - # This prevents any one class from getting too big. - if (self.num_constants % MAX_CONST_PER_STEP) == 0: - cc_num = len(self.ccs) - self.ccs.append(jvm.JvmClassType('pypy.Constant_%d' % cc_num)) - self.num_constants += 1 - - const.fieldobj = Field(self.ccs[-1].name, const.name, jfieldty, True) - - def push_constant(self, gen, const): - const.fieldobj.load(gen) - - def _store_constant(self, gen, const): - const.fieldobj.store(gen) - - # _________________________________________________________________ - # Constant Generation - # - # The JVM constants are generated as follows: - # - # First, a set of classes are used as simple structs with static - # fields that store each constant. These class names have already - # been generated, and they are stored in the member array self.ccs. - # Therefore, the first thing we do is to generate these classes - # by iterating over all constants and declaring their fields. - # - # We then generate initialization code the constants in a SEPARATE - # set of classes, named pypy.ConstantInit_NNN. We generate one such - # class for each "step" of the underlying BaseConstantGenerator. - # - # Note that, in this setup, we cannot rely on the JVM's class init - # to initialize our constants for us: instead, we generate a static - # method (jPyPyConstantInitMethod) in _end_gen_constants() that - # invokes each of the ConstantInit_NNN's methods. - # - # Normally, these static field classes and the initialization - # code are stored together. The JVM stores them seperately, - # because otherwise it is quite hard to ensure that (a) the - # constants are initialized in the right order, and (b) all of - # the constant declarations are emitted when they are needed. - - def gen_constants(self, ilasm): - self.step_classes = [] - - # First, create the classes that store the static fields. - constants_by_cls = {} - for const in self.cache.values(): - try: - constants_by_cls[const.fieldobj.class_name].append(const) - except KeyError: - constants_by_cls[const.fieldobj.class_name] = [const] - for cc in self.ccs: - ilasm.begin_class(cc, jvm.jObject) - for const in constants_by_cls[cc.name]: - ilasm.add_field(const.fieldobj) - ilasm.end_class() - - # Now, delegate to the normal code for the rest - super(JVMConstantGenerator, self).gen_constants(ilasm) - - def _begin_gen_constants(self, gen, all_constants): - return gen - - def _declare_const(self, gen, const): - # in JVM, this is done first, in gen_constants() - return - - def _consider_split_current_function(self, gen): - if gen.get_instruction_count() >= self.MAX_INSTRUCTION_COUNT: - const = self.current_const - gen.pop(const.value._TYPE) - self._new_step(gen) - self._push_constant_during_init(gen, const) - - def _declare_step(self, gen, stepnum): - self.step_classes.append(jvm.JvmClassType( - 'pypy.ConstantInit_%d' % stepnum)) - gen.begin_class(self.step_classes[-1], jvm.jObject) - gen.begin_function('constant_init', [], [], jVoid, True) - - def _close_step(self, gen, stepnum): - gen.return_val(jVoid) - gen.end_function() # end constant_init() - gen.end_class() # end pypy.ConstantInit_NNN - - def _end_gen_constants(self, gen, numsteps): - gen.begin_class(jPyPyConstantInit, jvm.jObject) - gen.begin_j_function(jPyPyConstantInit, jPyPyConstantInitMethod) - for cls in self.step_classes: - m = Method.s(cls, "constant_init", [], jVoid) - gen.emit(m) - gen.return_val(jVoid) - gen.end_function() - - gen.end_class() - -class JVMStaticMethodConst(StaticMethodConst): - - def record_dependencies(self): - if self.value is ootype.null(self.value._TYPE): - self.delegate_impl = None - return - StaticMethodConst.record_dependencies(self) - self.delegate_impl = self.db.record_delegate_standalone_func_impl( - self.value.graph) - - def create_pointer(self, gen): - if self.delegate_impl: - gen.new_with_jtype(self.delegate_impl) - else: - gen.push_null(jvm.jObject) - - def initialize_data(self, constgen, gen): - return - -class JVMCustomDictConst(CustomDictConst): - - def record_dependencies(self): - # Near as I can tell, self.value is an ootype._custom_dict, - # key_eq is a Python function and graph is, well, a method - # graph that seems to be added to the function pointer - # somewhere. Adapted from cli/constant.py - if self.value is ootype.null(self.value._TYPE): - return - self.eq_jcls = self.db.record_delegate_standalone_func_impl( - self.value._dict.key_eq.graph) - self.hash_jcls = self.db.record_delegate_standalone_func_impl( - self.value._dict.key_hash.graph) - - CustomDictConst.record_dependencies(self) - - def create_pointer(self, gen): - gen.new_with_jtype(self.eq_jcls) - gen.new_with_jtype(self.hash_jcls) - gen.emit(jvm.CUSTOMDICTMAKE) - -class JVMWeakRefConst(WeakRefConst): - - # Ensure that weak refs are initialized last: - PRIORITY = 200 - - def jtype(self): - return jvm.jPyPyWeakRef - - def create_pointer(self, gen): - if not self.value: - TYPE = ootype.ROOT - gen.push_null(TYPE) - else: - TYPE = self.value._TYPE - push_constant(self.db, self.value._TYPE, self.value, gen) - gen.create_weakref(TYPE) - - def initialize_data(self, constgen, gen): - gen.pop(ootype.ROOT) - return True - - - diff --git a/rpython/translator/jvm/database.py b/rpython/translator/jvm/database.py deleted file mode 100644 --- a/rpython/translator/jvm/database.py +++ /dev/null @@ -1,512 +0,0 @@ -""" -The database centralizes information about the state of our translation, -and the mapping between the OOTypeSystem and the Java type system. -""" - -from cStringIO import StringIO -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rtyper.ootypesystem import ootype, rclass -from rpython.rtyper.ootypesystem.module import ll_os -from rpython.translator.jvm import node, methods -from rpython.translator.jvm.option import getoption -from rpython.translator.jvm.builtin import JvmBuiltInType -from rpython.translator.oosupport.database import Database as OODatabase -from rpython.annotator.signature import annotation -from rpython.annotator.model import annotation_to_lltype -import rpython.translator.jvm.constant as jvmconst -import rpython.translator.jvm.typesystem as jvm - -# ______________________________________________________________________ -# Database object - -class Database(OODatabase): - def __init__(self, genoo): - OODatabase.__init__(self, genoo) - - # Private attributes: - self._jasmin_files = [] # list of strings --- .j files we made - self._classes = {} # Maps ootype class objects to node.Class objects, - # and JvmType objects as well - self._functions = {} # graph -> jvm.Method - - # (jargtypes, jrettype) -> node.StaticMethodInterface - self._delegates = {} - - # (INSTANCE, method_name) -> node.StaticMethodImplementation - self._bound_methods = {} - - self._function_names = {} # graph --> function_name - - self._constants = {} # flowmodel.Variable --> jvm.Const - - # Create information about the Main class we will build: - # - # It will have two static fields, 'ilink' and 'pypy'. The - # first points to an instance of the interface pypy.Interlink - # which we will be generated. The second points to an instance - # of pypy.PyPy which was created with this Interlink instance. - # - # The Interlink class provides the bridge between static helper - # code and dynamically generated classes. Since there is one - # Main per set of translated code, this also allows multiple - # PyPy interpreters to overlap with one another. - # - # These are public attributes that are referenced from - # elsewhere in the code using - # jvm.Generator.push_interlink() and .push_pypy(). - self.jPyPyMain = jvm.JvmClassType(self._pkg('Main')) - self.pypy_field = jvm.Field.s(self.jPyPyMain, 'pypy', jvm.jPyPy) - self.interlink_field = jvm.Field.s(self.jPyPyMain, 'ilink', - jvm.jPyPyInterlink) - - # _________________________________________________________________ - # Java String vs Byte Array - # - # We allow the user to configure whether Python strings are stored - # as Java strings, or as byte arrays. The latter saves space; the - # former may be faster. - - using_byte_array = False - - # XXX have to fill this in - - # _________________________________________________________________ - # Miscellaneous - - def _uniq(self, nm): - return nm + "_" + str(self.unique()) - - def _pkg(self, nm): - return "%s.%s" % (getoption('package'), nm) - - def class_name(self, TYPE): - jtype = self.lltype_to_cts(TYPE) - assert isinstance(jtype, jvm.JvmClassType) - return jtype.name - - def add_jasmin_file(self, jfile): - """ Adds to the list of files we need to run jasmin on """ - self._jasmin_files.append(jfile) - - def jasmin_files(self): - """ Returns list of files we need to run jasmin on """ - return self._jasmin_files - - # _________________________________________________________________ - # Node Creation - # - # Creates nodes that represents classes, functions, simple constants. - - def create_interlink_node(self, methods): - """ This is invoked by create_interlinke_node() in - jvm/prebuiltnodes.py. It creates a Class node that will - be an instance of the Interlink interface, which is used - to allow the static java code to throw PyPy exceptions and the - like. - - The 'methods' argument should be a dictionary whose keys are - method names and whose entries are jvm.Method objects which - the corresponding method should invoke. """ - - nm = self._pkg(self._uniq('InterlinkImplementation')) - cls = node.Class(nm, supercls=jvm.jObject) - for method_name, helper in methods.items(): - cls.add_method(node.InterlinkFunction(cls, method_name, helper)) - cls.add_interface(jvm.jPyPyInterlink) - self.jInterlinkImplementation = cls - self.pending_node(cls) - - def types_for_graph(self, graph): - """ - Given a graph, returns a tuple like so: - ( (java argument types...), java return type ) - For example, if the graph took two strings and returned a bool, - then the return would be: - ( (jString, jString), jBool ) - """ - ARGS = [v.concretetype for v in graph.getargs()] - RESULT = graph.getreturnvar().concretetype - return self.types_for_signature(ARGS, RESULT) - - def types_for_signature(self, ARGS, RESULT): - ARGS = [ARG for ARG in ARGS if ARG is not ootype.Void] - jargtypes = tuple([self.lltype_to_cts(ARG) for ARG in ARGS]) - jrettype = self.lltype_to_cts(RESULT) - return jargtypes, jrettype - - def _function_for_graph(self, classobj, funcnm, is_static, graph): - - """ - Creates a node.Function object for a particular graph. Adds - the method to 'classobj', which should be a node.Class object. - """ - jargtypes, jrettype = self.types_for_graph(graph) - funcobj = node.GraphFunction( - self, classobj, funcnm, jargtypes, jrettype, graph, is_static) - return funcobj - - def _translate_record(self, OOTYPE): - assert OOTYPE is not ootype.ROOT - - # Create class object if it does not already exist: - if OOTYPE in self._classes: - return self._classes[OOTYPE] - - # Create the class object first - clsnm = self._pkg(self._uniq('Record')) - clsobj = node.Class(clsnm, jvm.jObject) - self._classes[OOTYPE] = clsobj - - # Add fields: - self._translate_class_fields(clsobj, OOTYPE) - - # generate toString - dump_method = methods.RecordDumpMethod(self, OOTYPE, clsobj) - clsobj.add_method(dump_method) - - # generate equals and hash - equals_method = methods.DeepEqualsMethod(self, OOTYPE, clsobj) - clsobj.add_method(equals_method) - hash_method = methods.DeepHashMethod(self, OOTYPE, clsobj) - clsobj.add_method(hash_method) - - self.pending_node(clsobj) - return clsobj - - def _translate_superclass_of(self, OOSUB): - """ - Invoked to translate OOSUB's super class. Normally just invokes - pending_class, but we in the case of exceptions.Exception we - need to return Throwable to please the JVM. - """ - OOSUPER = OOSUB._superclass - if OOSUB._name == "exceptions.Exception": - return jvm.jPyPyThrowable - return self.pending_class(OOSUPER) - - def _translate_instance(self, OOTYPE): - assert isinstance(OOTYPE, ootype.Instance) - assert OOTYPE is not ootype.ROOT - - # Create class object if it does not already exist: - if OOTYPE in self._classes: - return self._classes[OOTYPE] - - # Create the class object first - clsnm = self._pkg(self._uniq(OOTYPE._name)) - clsobj = node.Class(clsnm) - self._classes[OOTYPE] = clsobj - - # Resolve super class - assert OOTYPE._superclass - supercls = self._translate_superclass_of(OOTYPE) - clsobj.set_super_class(supercls) - - # TODO --- mangle field and method names? Must be - # deterministic, or use hashtable to avoid conflicts between - # classes? - - # Add fields: - self._translate_class_fields(clsobj, OOTYPE) - - # Add methods: - for mname, mimpl in OOTYPE._methods.iteritems(): - if not hasattr(mimpl, 'graph'): - # Abstract method - METH = mimpl._TYPE - arglist = [self.lltype_to_cts(ARG) for ARG in METH.ARGS - if ARG is not ootype.Void] - returntype = self.lltype_to_cts(METH.RESULT) - clsobj.add_abstract_method(jvm.Method.v( - clsobj, mname, arglist, returntype)) - else: - # if the first argument's type is not a supertype of - # this class it means that this method this method is - # not really used by the class: don't render it, else - # there would be a type mismatch. - args = mimpl.graph.getargs() - SELF = args[0].concretetype - if not ootype.isSubclass(OOTYPE, SELF): continue - mobj = self._function_for_graph( - clsobj, mname, False, mimpl.graph) - # XXX: this logic is broken: it might happen that there are - # ootype.Instance which contains a meth whose graph is exactly - # the same as the meth in the superclass: in this case, - # len(graphs) == 1 but we cannot just mark the method as final - # (or we can, but we should avoid to emit the method in the - # subclass, then) - ## graphs = OOTYPE._lookup_graphs(mname) - ## if len(graphs) == 1: - ## mobj.is_final = True - clsobj.add_method(mobj) - - # currently, we always include a special "dump" method for debugging - # purposes - dump_method = node.InstanceDumpMethod(self, OOTYPE, clsobj) - clsobj.add_method(dump_method) - - self.pending_node(clsobj) - return clsobj - - def _translate_class_fields(self, clsobj, OOTYPE): - for fieldnm, (FIELDOOTY, fielddef) in OOTYPE._fields.iteritems(): - if FIELDOOTY is ootype.Void: continue - fieldty = self.lltype_to_cts(FIELDOOTY) - clsobj.add_field( - jvm.Field(clsobj.name, fieldnm, fieldty, False, FIELDOOTY), - fielddef) - - def pending_class(self, OOTYPE): - return self.lltype_to_cts(OOTYPE) - - def pending_function(self, graph): - """ - This is invoked when a standalone function is to be compiled. - It creates a class named after the function with a single - method, invoke(). This class is added to the worklist. - Returns a jvm.Method object that allows this function to be - invoked. - """ - if graph in self._functions: - return self._functions[graph] - classnm = self._pkg(self._uniq(graph.name)) - classobj = node.Class(classnm, self.pending_class(ootype.ROOT)) - funcobj = self._function_for_graph(classobj, "invoke", True, graph) - classobj.add_method(funcobj) - self.pending_node(classobj) - res = self._functions[graph] = funcobj.method() - return res - - def record_delegate(self, TYPE): - """ - Creates and returns a StaticMethodInterface type; this type - represents an abstract base class for functions with a given - signature, represented by TYPE, a ootype.StaticMethod - instance. - """ - - # Translate argument/return types into java types, check if - # we already have such a delegate: - jargs = tuple([self.lltype_to_cts(ARG) for ARG in TYPE.ARGS - if ARG is not ootype.Void]) - jret = self.lltype_to_cts(TYPE.RESULT) - return self.record_delegate_sig(jargs, jret) - - def record_delegate_sig(self, jargs, jret): - """ - Like record_delegate, but the signature is in terms of java - types. jargs is a list of JvmTypes, one for each argument, - and jret is a Jvm. Note that jargs does NOT include an - entry for the this pointer of the resulting object. - """ - key = (jargs, jret) - if key in self._delegates: - return self._delegates[key] - - # TODO: Make an intelligent name for this interface by - # mangling the list of parameters - name = self._pkg(self._uniq('Delegate')) - - # Create a new one if we do not: - interface = node.StaticMethodInterface(name, jargs, jret) - self._delegates[key] = interface - self.pending_node(interface) - return interface - - def record_delegate_standalone_func_impl(self, graph): - """ - Creates a class with an invoke() method that invokes the given - graph. This object can be used as a function pointer. It - will extend the appropriate delegate for the graph's - signature. - """ - jargtypes, jrettype = self.types_for_graph(graph) - super_class = self.record_delegate_sig(jargtypes, jrettype) - pfunc = self.pending_function(graph) - implnm = self._pkg(self._uniq(graph.name+'_delegate')) - n = node.StaticMethodImplementation(implnm, super_class, None, pfunc) - self.pending_node(n) - return n - - def record_delegate_bound_method_impl(self, INSTANCE, method_name): - """ - Creates an object with an invoke() method which invokes - a method named method_name on an instance of INSTANCE. - """ - key = (INSTANCE, method_name) - if key in self._bound_methods: - return self._bound_methods[key] - METH_TYPE = INSTANCE._lookup(method_name)[1]._TYPE - super_class = self.record_delegate(METH_TYPE) - self_class = self.lltype_to_cts(INSTANCE) - mthd_obj = self_class.lookup_method(method_name) - implnm = self._pkg(self._uniq( - self_class.simple_name()+"_"+method_name+"_delegate")) - n = self._bound_methods[key] = node.StaticMethodImplementation( - implnm, super_class, self_class, mthd_obj) - self.pending_node(n) - return n - - # _________________________________________________________________ - # toString functions - # - # Obtains an appropriate method for serializing an object of - # any type. - - _toString_methods = { - ootype.Signed:jvm.INTTOSTRINGI, - ootype.Unsigned:jvm.PYPYSERIALIZEUINT, - ootype.SignedLongLong:jvm.LONGTOSTRINGL, - ootype.UnsignedLongLong: jvm.PYPYSERIALIZEULONG, - ootype.Float:jvm.PYPYSERIALIZEDOUBLE, - ootype.Bool:jvm.PYPYSERIALIZEBOOLEAN, - ootype.Void:jvm.PYPYSERIALIZEVOID, - ootype.Char:jvm.PYPYESCAPEDCHAR, - ootype.UniChar:jvm.PYPYESCAPEDUNICHAR, - ootype.String:jvm.PYPYESCAPEDSTRING, - ootype.Unicode:jvm.PYPYESCAPEDUNICODE, - rffi.SHORT:jvm.SHORTTOSTRINGS, - } - - def toString_method_for_ootype(self, OOTYPE): - """ - Assuming than an instance of type OOTYPE is pushed on the - stack, returns a Method object that you can invoke. This method - will return a string representing the contents of that type. - - Do something like: - - > gen.load(var) - > mthd = db.toString_method_for_ootype(var.concretetype) - > mthd.invoke(gen) - - to print the value of 'var'. - """ - return self._toString_methods.get(OOTYPE, jvm.PYPYSERIALIZEOBJECT) - - # _________________________________________________________________ - # Type translation functions - # - # Functions which translate from OOTypes to JvmType instances. - # FIX --- JvmType and their Class nodes should not be different. - - def escape_name(self, nm): - # invoked by oosupport/function.py; our names don't need escaping? - return nm - - def llvar_to_cts(self, llv): - """ Returns a tuple (JvmType, str) with the translated type - and name of the given variable""" - return self.lltype_to_cts(llv.concretetype), llv.name - - # Dictionary for scalar types; in this case, if we see the key, we - # will return the value - ootype_to_scalar = { - ootype.Void: jvm.jVoid, - ootype.Signed: jvm.jInt, - ootype.Unsigned: jvm.jInt, - ootype.SignedLongLong: jvm.jLong, - ootype.UnsignedLongLong: jvm.jLong, - ootype.Bool: jvm.jBool, - ootype.Float: jvm.jDouble, - ootype.Char: jvm.jChar, # byte would be sufficient, but harder - ootype.UniChar: jvm.jChar, - ootype.Class: jvm.jClass, - ootype.ROOT: jvm.jObject, # treat like a scalar - rffi.SHORT: jvm.jShort, - } - - # Dictionary for non-scalar types; in this case, if we see the key, we - # will return a JvmBuiltInType based on the value - ootype_to_builtin = { - ootype.String: jvm.jString, - ootype.Unicode: jvm.jString, - ootype.StringBuilder: jvm.jStringBuilder, - ootype.UnicodeBuilder: jvm.jStringBuilder, - ootype.List: jvm.jArrayList, - ootype.Dict: jvm.jHashMap, - ootype.DictItemsIterator:jvm.jPyPyDictItemsIterator, - ootype.CustomDict: jvm.jPyPyCustomDict, - ootype.WeakReference: jvm.jPyPyWeakRef, - ll_os.STAT_RESULT: jvm.jPyPyStatResult, - - # These are some configured records that are generated by Java - # code. - #ootype.Record({"item0": ootype.Signed, "item1": ootype.Signed}): - #jvm.jPyPyRecordSignedSigned, - #ootype.Record({"item0": ootype.Float, "item1": ootype.Signed}): - #jvm.jPyPyRecordFloatSigned, - #ootype.Record({"item0": ootype.Float, "item1": ootype.Float}): - #jvm.jPyPyRecordFloatFloat, - #ootype.Record({"item0": ootype.String, "item1": ootype.String}): - #jvm.jPyPyRecordStringString, - } - - def lltype_to_cts(self, OOT): - import sys - res = self._lltype_to_cts(OOT) - return res - - def _lltype_to_cts(self, OOT): - """ Returns an instance of JvmType corresponding to - the given OOType """ - - # Handle built-in types: - if OOT in self.ootype_to_scalar: - return self.ootype_to_scalar[OOT] - if (isinstance(OOT, lltype.Ptr) and - isinstance(OOT.TO, lltype.OpaqueType)): - return jvm.jObject - if OOT in self.ootype_to_builtin: - return JvmBuiltInType(self, self.ootype_to_builtin[OOT], OOT) - if isinstance(OOT, ootype.Array): - return self._array_type(OOT.ITEM) - if OOT.__class__ in self.ootype_to_builtin: - return JvmBuiltInType( - self, self.ootype_to_builtin[OOT.__class__], OOT) - - # Handle non-built-in-types: - if isinstance(OOT, ootype.Instance): - return self._translate_instance(OOT) - if isinstance(OOT, ootype.Record): - return self._translate_record(OOT) - if isinstance(OOT, ootype.StaticMethod): - return self.record_delegate(OOT) - if OOT is ootype.Object: - return jvm.jObject - - assert False, "Untranslatable type %s!" % OOT - - ooitemtype_to_array = { - ootype.Signed : jvm.jIntArray, - ootype.Unsigned : jvm.jIntArray, - ootype.Char : jvm.jCharArray, - ootype.Bool : jvm.jBoolArray, - ootype.UniChar : jvm.jCharArray, - ootype.String : jvm.jStringArray, - ootype.Float : jvm.jDoubleArray, - ootype.Void : jvm.jVoidArray, - } - - def _array_type(self, ITEM): - if ITEM in self.ooitemtype_to_array: - return self.ooitemtype_to_array[ITEM] - return jvm.jObjectArray - - def annotation_to_cts(self, _tp): - s_tp = annotation(_tp) - TP = annotation_to_lltype(s_tp) - return self.lltype_to_cts(TP) - - # _________________________________________________________________ - # Uh.... - # - # These functions are invoked by the code in oosupport, but I - # don't think we need them or use them otherwise. - - def record_function(self, graph, name): - self._function_names[graph] = name - - def graph_name(self, graph): - # XXX: graph name are not guaranteed to be unique - return self._function_names.get(graph, None) diff --git a/rpython/translator/jvm/generator.py b/rpython/translator/jvm/generator.py deleted file mode 100644 --- a/rpython/translator/jvm/generator.py +++ /dev/null @@ -1,946 +0,0 @@ -try: - import pycrash - mypycrash = pycrash.PyCrash({'AppName': 'genjvm'}) -except ImportError: - mypycrash = None - -from rpython.flowspace import model as flowmodel -from rpython.translator.oosupport.metavm import Generator -from rpython.translator.oosupport.treebuilder import SubOperation -from rpython.translator.oosupport.function import render_sub_op -from rpython.rtyper.ootypesystem import ootype -from rpython.rlib.objectmodel import CDefinedIntSymbolic -from rpython.rlib.rfloat import isnan, isinf -from rpython.translator.oosupport.constant import push_constant -import rpython.translator.jvm.typesystem as jvm - -# Load a few commonly used names, but prefer to use 'jvm.Name' -from rpython.translator.jvm.typesystem import \ - jPyPy, jString, jInt, jVoid - -# ___________________________________________________________________________ -# Labels -# -# We use a class here just for sanity checks and debugging print-outs. - -class Label(object): - - def __init__(self, number, desc): - """ number is a unique integer - desc is a short, descriptive string that is a valid java identifier """ - self.label = "%s_%s" % (desc, number) - - def __repr__(self): - return "Label(%s)"%self.label - - def jasmin_syntax(self): - return self.label - -# ___________________________________________________________________________ -# Generator State - -class ClassState(object): - """ When you invoked begin_class(), one of these objects is allocated - and tracks the state as we go through the definition process. """ - def __init__(self, classty, superclassty): - self.class_type = classty - self.superclass_type = superclassty - self.line_number = 1 - def out(self, arg): - self.file.write(arg) - self.line_number += arg.count("\n") - -class FunctionState(object): - """ When you invoked begin_function(), one of these objects is allocated - and tracks the state as we go through the definition process. """ - def __init__(self): - self.next_offset = 0 - self.local_vars = {} - self.function_arguments = [] - self.instr_counter = 0 - def add_var(self, jvar, jtype, is_param): - """ Adds new entry for variable 'jvar', of java type 'jtype' """ - idx = self.next_offset - self.next_offset += jtype.descriptor.type_width() - if jvar: - assert jvar.name not in self.local_vars # never been added before - self.local_vars[jvar.name] = (idx, jtype) - if is_param: - self.function_arguments.append((jtype, idx)) - return idx - def var_offset(self, jvar, jtype): - """ Returns offset for variable 'jvar', of java type 'jtype' """ - if jvar.name in self.local_vars: - return self.local_vars[jvar.name][0] - return self.add_var(jvar, jtype, False) - def var_info_list(self): - var_info_list = [None] * self.next_offset - for name, (idx, jtype) in self.local_vars.items(): - var_info_list[idx] = (name, jtype) - return var_info_list - - - -# ___________________________________________________________________________ -# Generator - -class JVMGenerator(Generator): - - """ Base class for all JVM generators. Invokes a small set of '_' - methods which indicate which opcodes to emit; these can be - translated by a subclass into Jasmin assembly, binary output, etc. - Must be inherited from to specify a particular output format; - search for the string 'unimplemented' to find the methods that - must be overloaded. """ - - def __init__(self, db): - self.db = db - self.label_counter = 0 - self.curclass = None - self.curfunc = None - - # __________________________________________________________________ - # JVM specific methods to be overloaded by a subclass - # - # If the name does not begin with '_', it will be called from - # outside the generator. - - def begin_class(self, classty, superclsty, - abstract=False, interface=False): - """ - Begins a class declaration. Overall flow of class declaration - looks like: - - begin_class() - {implements()} - {add_field()} - begin_constructor()...end_constructor() - [begin_function()...end_function()] - end_class() - - Where items in brackets may appear anywhere from 0 to inf times. - - classty --- JvmType for the class - superclassty --- JvmType for the superclass - """ - assert not self.curclass - self.curclass = ClassState(classty, superclsty) - self._begin_class(abstract, interface) - - def end_class(self): - self._end_class() - self.curclass = None - self.curfunc = None - - def current_type(self): - """ Returns the jvm type we are currently defining. If - begin_class() has not been called, returns None. """ - return self.curclass.class_type - - def _begin_class(self, abstract, interface): - """ Main implementation of begin_class """ - raise NotImplementedError - - def _end_class(self): - """ Main implementation of end_class """ - raise NotImplementedError - - def implements(self, jinterface): - """ - Indicates that the current class implements the interface - jinterface, which should be a JvmType. - """ - raise NotImplementedError - - def add_field(self, fobj): - """ - fobj: a Field object - """ - unimplemented - - def begin_constructor(self): - """ - Emits the constructor for this class, which merely invokes the - parent constructor. - - superclsnm --- same Java name of super class as from begin_class - """ - self.begin_function("", [], [self.current_type()], jVoid) - self.load_jvm_var(self.current_type(), 0) - jmethod = jvm.Method.c(self.curclass.superclass_type, ()) - jmethod.invoke(self) - - def end_constructor(self): - self.return_val(jVoid) - self.end_function() - - def begin_j_function(self, cls_obj, method_obj, abstract=False): - """ - A convenience function that invokes begin_function() with the - appropriate arguments to define a method on class 'cls_obj' that - could be invoked with 'method_obj'. - """ - if method_obj.is_static(): def_args = [] - else: def_args = [cls_obj] - return self.begin_function(method_obj.method_name, - [], - def_args+method_obj.argument_types, - method_obj.return_type, - static=method_obj.is_static(), - abstract=abstract) - - def begin_function(self, funcname, argvars, argtypes, rettype, - static=False, abstract=False, final=False): - """ - funcname --- name of the function - argvars --- list of objects passed to load() that represent arguments; - should be in order, or () if load() will not be used - argtypes --- JvmType for each argument [INCLUDING this] - rettype --- JvmType for the return value - static --- keyword, if true then a static func is generated - final --- keyword, if true then a final method is generated - - This function also defines the scope for variables passed to - load()/store(). - """ - # Compute the indicates of each argument in the local variables - # for the function. Note that some arguments take up two slots - # depending on their type [this is compute by type_width()] - assert not self.curfunc - self.curfunc = FunctionState() - for idx, ty in enumerate(argtypes): - if idx < len(argvars): var = argvars[idx] - else: var = None - self.curfunc.add_var(var, ty, True) - # Prepare a map for the local variable indices we will add - # Let the subclass do the rest of the work; note that it does - # not need to know the argvars parameter, so don't pass it - self._begin_function(funcname, argtypes, rettype, static, abstract, final) - - def _begin_function(self, funcname, argtypes, rettype, static, abstract, final): - """ - Main implementation of begin_function. The begin_function() - does some generic handling of args. - """ - unimplemented - - def end_function(self): - self._end_function() - self.curfunc = None - - def _end_function(self): - unimplemented - - def mark(self, lbl): - """ Marks the point that a label indicates. """ - unimplemented - - def _instr(self, opcode, *args): - """ Emits an instruction with the given opcode and arguments. - The correct opcode and their types depends on the opcode. """ - unimplemented - - def return_val(self, jtype): - """ Returns a value from top of stack of the JvmType 'jtype' """ - self._instr(jvm.RETURN.for_type(jtype)) - - def load_class_name(self): - """ Loads the name of the *Java* class of the object on the top of - the stack as a Java string. Note that the result for a PyPy - generated class will look something like 'pypy.some.pkg.cls' """ - self.emit(jvm.OBJECTGETCLASS) - self.emit(jvm.CLASSGETNAME) - - def load_string(self, str): - """ Pushes a Java version of a Python string onto the stack. - 'str' should be a Python string encoded in UTF-8 (I think) """ - # Create an escaped version of str: - def escape(char): - if char == '"': return r'\"' - if char == '\n': return r'\n' - if char == "\\": return r'\\' - if ord(char) > 127: return r'\u%04x' % ord(char) - return char - res = ('"' + - "".join(escape(c) for c in str) + - '"') - # Use LDC to load the Java version: - # XXX --- support byte arrays here? Would be trickier! - self._instr(jvm.LDC, res) - - def load_jvm_var(self, jvartype, varidx): - """ Loads from jvm slot #varidx, which is expected to hold a value of - type vartype """ - assert varidx < self.curfunc.next_offset - if jvartype is jVoid: - return - opc = jvm.LOAD.for_type(jvartype) - self._instr(opc, varidx) - - def store_jvm_var(self, vartype, varidx): - """ Loads from jvm slot #varidx, which is expected to hold a value of - type vartype """ - self._instr(jvm.STORE.for_type(vartype), varidx) - - def load_from_array(self, elemtype): - """ Loads something from an array; the result will be of type 'elemtype' - (and hence the array is of type 'array_of(elemtype)'), where - 'elemtype' is a JvmType. Assumes that the array ref and index are - already pushed onto stack (in that order). """ - self._instr(jvm.ARRLOAD.for_type(elemtype)) - - def store_to_array(self, elemtype): - """ Stores something into an array; the result will be of type - 'elemtype' (and hence the array is of type - 'array_of(elemtype)'), where 'elemtype' is a JvmType. Assumes - that the array ref, index, and value are already pushed onto - stack (in that order).""" - self._instr(jvm.ARRLOAD.for_type(elemtype)) - - def unique_label(self, desc, mark=False): - """ Returns an opaque, unique label object that can be passed an - argument for branching opcodes, or the mark instruction. - - 'desc' should be a comment describing the use of the label. - It is for decorative purposes only and should be a valid C - identifier. - - 'mark' --- if True, then also calls self.mark() with the new lbl """ - res = Label(self.label_counter, desc) - self.label_counter += 1 - if mark: - self.mark(res) - return res - - def load_this_ptr(self): - """ Convenience method. Be sure you only call it from a - virtual method, not static methods. """ - self.load_jvm_var(jvm.jObject, 0) - - def load_function_argument(self, index): - """ Convenience method. Loads function argument #index; note that - the this pointer is index #0. """ - jtype, jidx = self.curfunc.function_arguments[index] - self.load_jvm_var(jtype, jidx) - - def prepare_generic_argument(self, ITEMTYPE): - jty = self.db.lltype_to_cts(ITEMTYPE) - self.prepare_generic_argument_with_jtype(jty) - - def prepare_generic_argument_with_jtype(self, jty): - if jty is jVoid: - self.emit(jvm.ACONST_NULL) - elif isinstance(jty, jvm.JvmScalarType): - self.box_value(jty) - - def prepare_generic_result(self, ITEMTYPE): - jresty = self.db.lltype_to_cts(ITEMTYPE) - self.prepare_generic_result_with_jtype(jresty) - - def prepare_generic_result_with_jtype(self, jresty): - if jresty is jVoid: - self.emit(jvm.POP) - elif isinstance(jresty, jvm.JvmScalarType): - # Perform any un-boxing required: - self.downcast_jtype(jresty.box_type) - self.unbox_value(jresty) - else: - # Perform any casting required: - self.downcast_jtype(jresty) - - def box_value(self, jscalartype): - """ Assuming that an value of type jscalartype is on the stack, - boxes it into an Object. """ - jclasstype = jscalartype.box_type - jmethod = jvm.Method.s( - jclasstype, 'valueOf', (jscalartype,), jclasstype) - self.emit(jmethod) - - def unbox_value(self, jscalartype): - """ Assuming that a boxed value of type jscalartype is on the stack, - unboxes it. """ - jclasstype = jscalartype.box_type - jmethod = jvm.Method.v( - jclasstype, jscalartype.unbox_method, (), jscalartype) - self.emit(jmethod) - - def swap(self): - """ Swaps the two words highest on the stack. """ - self.emit(jvm.SWAP) - - # __________________________________________________________________ - # Exception Handling - # - # You can demarcate regions of code as "try/catch" regions using - # the various functions included here. Either invoke - # try_catch_region(), in which case you must supply all the - # relevant labels, or use the begin_try()/end_try()/begin_catch() - # methods. In the latter case, you define the 3 needed labels as - # you go. Both begin_try() and end_try() must have been invoked - # before begin_catch() is invoked (i.e., the try region must - # appear before the corresponding catch regions). Note that - # end_try() can be called again to reset the end of the try - # region. - - def begin_try(self): - self.begintrylbl = self.unique_label("begin_try", mark=True) - - def end_try(self): - self.endtrylbl = self.unique_label("end_try", mark=True) - - def begin_catch(self, jexcclsty): - catchlbl = self.unique_label("catch", mark=True) - self.try_catch_region( - jexcclsty, self.begintrylbl, self.endtrylbl, catchlbl) - - def end_catch(self): - return - - def try_catch_region(self, jexcclsty, trystartlbl, tryendlbl, catchlbl): - """ - Indicates a try/catch region. - - Either invoked directly, or from the begin_catch() routine: - the latter is invoked by the oosupport code. - - 'jexcclsty' --- a JvmType for the class of exception to be caught - 'trystartlbl', 'tryendlbl' --- labels marking the beginning and end - of the try region - 'catchlbl' --- label marking beginning of catch region - """ - unimplemented - - _equals = { - ootype.Void: (None,None), - ootype.SignedLongLong: (jvm.LCMP, jvm.IFEQ), - ootype.UnsignedLongLong: (jvm.LCMP, jvm.IFEQ), - ootype.Float: (jvm.DCMPG, jvm.IFEQ), - ootype.Signed: (None,jvm.IF_ICMPNE), - ootype.Unsigned: (None,jvm.IF_ICMPNE), - ootype.Bool: (None,jvm.IF_ICMPNE), - ootype.Char: (None,jvm.IF_ICMPNE), - ootype.UniChar: (None,jvm.IF_ICMPNE), - } - def compare_values(self, OOTYPE, unequal_lbl): - """ Assumes that two instances of OOTYPE are pushed on the stack; - compares them and jumps to 'unequal_lbl' if they are unequal """ - if OOTYPE in self._equals: - i1, i2 = self._equals[OOTYPE] - if i1: self.emit(i1) - if i2: self.emit(i2, unequal_lbl) - return - self.emit(jvm.OBJEQUALS) - self.emit(jvm.IFEQ, unequal_lbl) - - _hash = { - ootype.Void: jvm.ICONST_0, - ootype.SignedLongLong: jvm.L2I, - ootype.UnsignedLongLong: jvm.L2I, - ootype.Float: jvm.D2I, - ootype.Signed: None, - ootype.Unsigned: None, - ootype.Bool: None, - ootype.Char: None, - ootype.UniChar: None, - } - def hash_value(self, OOTYPE): - """ Assumes that an instance of OOTYPE is pushed on the stack. - When finished, an int will be on the stack as a hash value. """ - if OOTYPE in self._hash: - i1 = self._hash[OOTYPE] - if i1: self.emit(i1) - return - self.emit(jvm.OBJHASHCODE) - - # __________________________________________________________________ - # Generator methods and others that are invoked by MicroInstructions - # - # These translate into calls to the above methods. - - def emit(self, instr, *args): - """ 'instr' in our case must be either a string, in which case - it is the name of a method to invoke, or an Opcode/Method - object (defined above).""" - - if instr is None: - return - - if isinstance(instr, str): - return getattr(self, instr)(*args) - - if isinstance(instr, jvm.Opcode): - return self._instr(instr, *args) - - if isinstance(instr, jvm.BaseMethod): - return instr.invoke(self) - - if isinstance(instr, jvm.Field) or isinstance(instr, jvm.Property): - return instr.load(self) - - raise Exception("Unknown object in call to emit(): "+repr(instr)) - - def _var_data(self, v): - # Determine java type: - jty = self.db.lltype_to_cts(v.concretetype) - import sys - # Determine index in stack frame slots: - # note that arguments and locals can be treated the same here - return jty, self.curfunc.var_offset(v, jty) - - def load(self, value): - if isinstance(value, flowmodel.Variable): - jty, idx = self._var_data(value) - return self.load_jvm_var(jty, idx) - - if isinstance(value, SubOperation): - return render_sub_op(value, self.db, self) - - if isinstance(value, flowmodel.Constant): - return push_constant(self.db, value.concretetype, value.value, self) - - raise Exception('Unexpected type for v in load(): '+ - repr(value.concretetype) + " v=" + repr(value)) - - def store(self, v): - # Ignore Void values - if v.concretetype is ootype.Void: - return - - if isinstance(v, flowmodel.Variable): - jty, idx = self._var_data(v) - return self.store_jvm_var(jty, idx) - raise Exception('Unexpected type for v in store(): '+v) - - def set_field(self, CONCRETETYPE, fieldname): - clsobj = self.db.pending_class(CONCRETETYPE) - fieldobj = clsobj.lookup_field(fieldname) - fieldobj.store(self) - - def push_pypy(self): - """ Pushes the PyPy object which contains all of our helper methods - onto the stack """ - self.db.pypy_field.load(self) - - def push_interlink(self): - """ Pushes the Interlink object which contains the methods - from prebuildnodes.py onto the stack """ - self.db.interlink_field.load(self) - - def get_field(self, CONCRETETYPE, fieldname): - clsobj = self.db.pending_class(CONCRETETYPE) - fieldobj = clsobj.lookup_field(fieldname) - fieldobj.load(self) - - def downcast(self, TYPE): - jtype = self.db.lltype_to_cts(TYPE) - self.downcast_jtype(jtype) - - def downcast_jtype(self, jtype): - self._instr(jvm.CHECKCAST, jtype) - - def instanceof(self, TYPE): - jtype = self.db.lltype_to_cts(TYPE) - self._instr(jvm.INSTANCEOF, jtype) - - # included for compatibility with oosupport, but instanceof_jtype - # follows our naming convention better - def isinstance(self, jtype): - return self.instanceof_jtype(jtype) - - def instanceof_jtype(self, jtype): - self._instr(jvm.INSTANCEOF, jtype) - - def branch_unconditionally(self, target_label): - self.goto(target_label) - - def branch_conditionally(self, cond, target_label): - if cond: - self._instr(jvm.IFNE, target_label) - else: - self._instr(jvm.IFEQ, target_label) - - def branch_if_equal(self, target_label): - self._instr(jvm.IF_ICMPEQ, target_label) - - def branch_if_not_equal(self, target_label): - self._instr(jvm.IF_ICMPNE, target_label) - - def call_graph(self, graph): - mthd = self.db.pending_function(graph) - mthd.invoke(self) - - def call_method(self, OOCLASS, method_name): - clsobj = self.db.pending_class(OOCLASS) - mthd = clsobj.lookup_method(method_name) - mthd.invoke(self) - - # Check if we have to convert the result type at all: - gener = jvm.Generifier(OOCLASS) - RETTYPE = gener.full_types(method_name)[1] - jrettype = self.db.lltype_to_cts(RETTYPE) - if jrettype != mthd.return_type: - # if the intended return type is not the same as the - # actual return type in the JVM (mthd.return_type), - # we have to "deal with it" - self.prepare_generic_result(RETTYPE) - - def prepare_call_primitive(self, op, module, name): - # Load the PyPy object pointer onto the stack: - self.push_pypy() - - # If necessary, load the ll_os object pointer instead: - if module == 'll_os': - jvm.PYPYOS.load(self) - - def call_primitive(self, op, module, name): - from rpython.translator.simplify import get_functype - callee = op.args[0].value - # it could be an rffi lltype, see test_primitive.test_rffi_ooprimitive - TYPE = get_functype(callee._TYPE) - jargtypes, jrettype = self.db.types_for_signature(TYPE.ARGS, TYPE.RESULT) - - # Determine what class the primitive is implemented in: - if module == 'll_os': - jcls = jvm.jll_os - else: - jcls = jPyPy - - # Determine the method signature: - # n.b.: if the method returns a generated type, then - # it's static type will be Object. This is because - # the method cannot directly refer to the Java type in - # .java source, as its name is not yet known. - if jrettype.is_generated(): - mthd = jvm.Method.v(jcls, name, jargtypes, jvm.jObject) - else: - mthd = jvm.Method.v(jcls, name, jargtypes, jrettype) - - # Invoke the method - self.emit(mthd) - - # Cast the result, if needed - if jrettype.is_generated(): - self.downcast_jtype(jrettype) - - def prepare_call_oostring(self, OOTYPE): - # Load the PyPy object pointer onto the stack: - self.push_pypy() - - def call_oostring(self, OOTYPE): - cts_type = self.db.lltype_to_cts(OOTYPE) - - # treat all objects the same: - if isinstance(cts_type, jvm.JvmClassType): - cts_type = jvm.jObject - - mthd = jvm.Method.v(jPyPy, 'oostring', [cts_type, jInt], jString) - self.emit(mthd) - if self.db.using_byte_array: - self.emit(jvm.PYPYSTRING2BYTES) - - def prepare_call_oounicode(self, OOTYPE): - # Load the PyPy object pointer onto the stack: - self.push_pypy() - - def call_oounicode(self, OOTYPE): - cts_type = self.db.lltype_to_cts(OOTYPE) - mthd = jvm.Method.v(jPyPy, 'oounicode', [cts_type], jString) - self.emit(mthd) - if self.db.using_byte_array: - self.emit(jvm.PYPYSTRING2BYTES) - - def new(self, TYPE): - jtype = self.db.lltype_to_cts(TYPE) - self.new_with_jtype(jtype) - - def new_with_jtype(self, jtype, ctor=None): - if ctor is None: - ctor = jvm.Method.c(jtype, ()) - self.emit(jvm.NEW, jtype) - self.emit(jvm.DUP) - self.emit(ctor) - - def oonewarray(self, TYPE, length): - jtype = self.db.lltype_to_cts(TYPE) - self.load(length) - jtype.make(self) - - def instantiate(self): - self.emit(jvm.PYPYRUNTIMENEW) - - def getclassobject(self, OOINSTANCE): - jtype = self.db.lltype_to_cts(OOINSTANCE) - self.load_string(jtype.name) - jvm.CLASSFORNAME.invoke(self) - - def dup(self, OOTYPE): - jtype = self.db.lltype_to_cts(OOTYPE) - self.dup_jtype(jtype) - - def dup_jtype(self, jtype): - if jtype.descriptor.type_width() == 1: - self.emit(jvm.DUP) - else: - self.emit(jvm.DUP2) - - def pop(self, OOTYPE): - jtype = self.db.lltype_to_cts(OOTYPE) - if jtype.descriptor.type_width() == 1: - self.emit(jvm.POP) - else: - self.emit(jvm.POP2) - - def push_null(self, OOTYPE): - self.emit(jvm.ACONST_NULL) - - # we can't assume MALLOC_ZERO_FILLED, because for scalar type the - # default item for ArrayList is null, not e.g. Integer(0) or - # Char(0). - DEFINED_INT_SYMBOLICS = {'MALLOC_ZERO_FILLED':0, - '0 /* we are not jitted here */': 0} - - def push_primitive_constant(self, TYPE, value): - - if TYPE is ootype.Void: - return - elif isinstance(value, CDefinedIntSymbolic): - self.emit(jvm.ICONST, self.DEFINED_INT_SYMBOLICS[value.expr]) - elif TYPE in (ootype.Bool, ootype.Signed): - self.emit(jvm.ICONST, int(value)) - elif TYPE is ootype.Unsigned: - # Converts the unsigned int into its corresponding signed value: - if value > 0x7FFFFFFF: - value = -((int(value) ^ 0xFFFFFFFF)+1) - self.emit(jvm.ICONST, value) - elif TYPE is ootype.Char or TYPE is ootype.UniChar: - self.emit(jvm.ICONST, ord(value)) - elif TYPE is ootype.SignedLongLong: - self._push_long_constant(long(value)) - elif TYPE is ootype.UnsignedLongLong: - # Converts the unsigned long into its corresponding signed value: - if value > 0x7FFFFFFFFFFFFFFF: - value = -((long(value) ^ 0xFFFFFFFFFFFFFFFF)+1) - self._push_long_constant(value) - elif TYPE is ootype.Float: - self._push_double_constant(float(value)) - elif TYPE in (ootype.String, ootype.Unicode): - if value == ootype.null(TYPE): - self.emit(jvm.ACONST_NULL) - else: - self.load_string(value._str) - else: - assert False, 'Unknown constant type: %s' % TYPE - - def _push_long_constant(self, value): - if value == 0: - self.emit(jvm.LCONST_0) - elif value == 1: - self.emit(jvm.LCONST_1) - else: - self.emit(jvm.LDC2, value) - - def _push_double_constant(self, value): - if isnan(value): - jvm.DOUBLENAN.load(self) - elif isinf(value): - if value > 0: jvm.DOUBLEPOSINF.load(self) - else: jvm.DOUBLENEGINF.load(self) - elif value == 0.0: - self.emit(jvm.DCONST_0) - elif value == 1.0: - self.emit(jvm.DCONST_1) - else: - # Big hack to avoid exponential notation: - self.emit(jvm.LDC2, "%22.22f" % value) - - def create_weakref(self, OOTYPE): - """ - After prepare_cast_ptr_to_weak_address has been called, and the - ptr to cast has been pushed, you can invoke this routine. - OOTYPE should be the type of value which was pushed. - The result will be that at the top of the stack is a weak reference. - """ - self.prepare_generic_argument(OOTYPE) - self.emit(jvm.PYPYWEAKREFCREATE) - - def deref_weakref(self, OOTYPE): - """ - If a weak ref is at the top of the stack, yields the object - that this weak ref is a pointer to. OOTYPE is the kind of object - you had a weak reference to. - """ - self.emit(jvm.PYPYWEAKREFGET) - self.prepare_generic_result(OOTYPE) - - # __________________________________________________________________ - # Methods invoked directly by strings in jvm/opcode.py - - def throw(self): - """ Throw the object from top of the stack as an exception """ - self._instr(jvm.ATHROW) - - def iabs(self): - jvm.MATHIABS.invoke(self) - - def dbl_abs(self): - jvm.MATHDABS.invoke(self) - - def bitwise_negate(self): - """ Invert all the bits in the "int" on the top of the stack """ - self._instr(jvm.ICONST, -1) - self._instr(jvm.IXOR) - - def goto(self, label): - """ Jumps unconditionally """ - self._instr(jvm.GOTO, label) - - def goto_if_true(self, label): - """ Jumps if the top of stack is true """ - self._instr(jvm.IFNE, label) - - def goto_if_false(self, label): - """ Jumps if the top of stack is false """ - self._instr(jvm.IFEQ, label) - -class JasminGenerator(JVMGenerator): - - def __init__(self, db, outdir): - JVMGenerator.__init__(self, db) - self.outdir = outdir - - def _begin_class(self, abstract, interface): - """ - Invoked by begin_class. It is expected that self.curclass will - be set when this method is invoked. - - abstract: True if the class to generate is abstract - - interface: True if the 'class' to generate is an interface - """ - - iclassnm = self.current_type().descriptor.int_class_name() - isuper = self.curclass.superclass_type.descriptor.int_class_name() - - jfile = self.outdir.join("%s.j" % iclassnm) - - jfile.dirpath().ensure(dir=True) - self.curclass.file = jfile.open('w') - self.db.add_jasmin_file(str(jfile)) - - # Determine the "declaration string" - if interface: decl_str = "interface" - else: decl_str = "class" - - # Write the JasminXT header - fields = ["public"] - if abstract: fields.append('abstract') - self.curclass.out(".%s %s %s\n" % ( - decl_str, " ".join(fields), iclassnm)) - self.curclass.out(".super %s\n" % isuper) - - def _end_class(self): - self.curclass.file.close() - - def close(self): - assert self.curclass is None - - def add_comment(self, comment): - if self.curclass: - self.curclass.out(" ; %s\n" % comment) - - def implements(self, jinterface): - self.curclass.out( - '.implements ' + jinterface.descriptor.int_class_name() + '\n') - - def add_field(self, fobj): - try: - fobj.jtype.descriptor - except AttributeError: - if mypycrash is not None: - mypycrash.forceDump() - mypycrash.saveToFile("/tmp/test_jvm_weakref.pycrash") - - kw = ['public'] - if fobj.is_static: kw.append('static') - self.curclass.out('.field %s %s %s\n' % ( - " ".join(kw), fobj.field_name, fobj.jtype.descriptor)) - - def _begin_function(self, funcname, argtypes, rettype, static, abstract, final): - - if not static: argtypes = argtypes[1:] - - # Throws clause? Only use RuntimeExceptions? - kw = ['public'] - if static: kw.append('static') - if abstract: kw.append('abstract') - if final: kw.append('final') - - self.curclass.out('.method %s %s(%s)%s\n' % ( - " ".join(kw), - funcname, - "".join([a.descriptor for a in argtypes]), - rettype.descriptor)) - self.abstract_method = abstract - - if not self.abstract_method: - self.function_start_label = self.unique_label( - 'function_start', True) - - def _end_function(self): - - if not self.abstract_method: - function_end_label = self.unique_label('function_end', True) - - self.curclass.out('.limit stack 100\n') # HACK, track max offset - self.curclass.out('.limit locals %d\n' % self.curfunc.next_offset) - - # Declare debug information for each variable: - var_info_list = self.curfunc.var_info_list() - for idx, data in enumerate(var_info_list): - if data: - name, jtype = data - if jtype is not jVoid: - self.curclass.out( - '.var %d is %s %s from %s to %s\n' % ( - idx, - name, - jtype.descriptor, - self.function_start_label.label, - function_end_label.label)) - - self.curclass.out('.end method\n') - - def mark(self, lbl): - """ Marks the point that a label indicates. """ - assert isinstance(lbl, Label) - self.curclass.out(' %s:\n' % lbl.jasmin_syntax()) - - # We count labels as instructions because ASM does: - self.curfunc.instr_counter += 1 - - def _instr(self, opcode, *args): - jvmstr, args = opcode.specialize(args) - def jasmin_syntax(arg): - if hasattr(arg, 'jasmin_syntax'): return arg.jasmin_syntax() - return str(arg) - strargs = [jasmin_syntax(arg) for arg in args] - instr_text = '%s %s' % (jvmstr, " ".join(strargs)) - self.curclass.out(' .line %d\n' % self.curclass.line_number) - self.curclass.out(' %s\n' % (instr_text,)) - self.curfunc.instr_counter+=1 - - def try_catch_region(self, jexcclsty, trystartlbl, tryendlbl, catchlbl): - self.curclass.out(' .catch %s from %s to %s using %s\n' % ( - jexcclsty.descriptor.int_class_name(), - trystartlbl.jasmin_syntax(), From noreply at buildbot.pypy.org Sun Jul 7 14:22:28 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 14:22:28 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Fix rpython/translator/backendopt Message-ID: <20130707122228.1F0FF1C0512@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65254:8fb078df2c3d Date: 2013-07-07 14:19 +0200 http://bitbucket.org/pypy/pypy/changeset/8fb078df2c3d/ Log: Fix rpython/translator/backendopt diff --git a/rpython/translator/backendopt/malloc.py b/rpython/translator/backendopt/malloc.py --- a/rpython/translator/backendopt/malloc.py +++ b/rpython/translator/backendopt/malloc.py @@ -537,85 +537,6 @@ raise AssertionError(op.opname) -class OOTypeMallocRemover(BaseMallocRemover): - - IDENTITY_OPS = ('same_as', 'ooupcast', 'oodowncast') - SUBSTRUCT_OPS = () - MALLOC_OP = 'new' - FIELD_ACCESS = dict.fromkeys(["oogetfield", - "oosetfield", - "oononnull", - "ooisnull", - #"oois", # ??? - #"instanceof", # ??? - ]) - SUBSTRUCT_ACCESS = {} - CHECK_ARRAY_INDEX = {} - - def get_STRUCT(self, TYPE): - return TYPE - - def union_wrapper(self, S): - return False - - def RTTI_dtor(self, STRUCT): - return False - - def inline_type(self, TYPE): - return isinstance(TYPE, (ootype.Record, ootype.Instance)) - - def _get_fields(self, TYPE): - if isinstance(TYPE, ootype.Record): - return TYPE._fields - elif isinstance(TYPE, ootype.Instance): - return TYPE._allfields() - else: - assert False - - def flatten(self, TYPE): - for name, (FIELDTYPE, default) in self._get_fields(TYPE).iteritems(): - key = self.key_for_field_access(TYPE, name) - constant = Constant(default) - constant.concretetype = FIELDTYPE - self.flatconstants[key] = constant - self.flatnames.append(key) - self.newvarstype[key] = FIELDTYPE - - def key_for_field_access(self, S, fldname): - CLS, TYPE = S._lookup_field(fldname) - return CLS, fldname - - def flowin_op(self, op, vars, newvarsmap): - if op.opname == "oogetfield": - S = op.args[0].concretetype - fldname = op.args[1].value - key = self.key_for_field_access(S, fldname) - newop = SpaceOperation("same_as", - [newvarsmap[key]], - op.result) - self.newops.append(newop) - elif op.opname == "oosetfield": - S = op.args[0].concretetype - fldname = op.args[1].value - key = self.key_for_field_access(S, fldname) - assert key in newvarsmap - newvarsmap[key] = op.args[2] - elif op.opname in ("same_as", "oodowncast", "ooupcast"): - vars[op.result] = True - # Consider the two pointers (input and result) as - # equivalent. We can, and indeed must, use the same - # flattened list of variables for both, as a "setfield" - # via one pointer must be reflected in the other. - elif op.opname in ("ooisnull", "oononnull"): - # we know the pointer is not NULL if it comes from - # a successful malloc - c = Constant(op.opname == "oononnull", lltype.Bool) - newop = SpaceOperation('same_as', [c], op.result) - self.newops.append(newop) - else: - raise AssertionError(op.opname) - - def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): if type_system == 'lltypesystem': remover = LLTypeMallocRemover(verbose) diff --git a/rpython/translator/backendopt/test/test_all.py b/rpython/translator/backendopt/test/test_all.py --- a/rpython/translator/backendopt/test/test_all.py +++ b/rpython/translator/backendopt/test/test_all.py @@ -2,11 +2,9 @@ from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST from rpython.translator.backendopt.support import md5digest -from rpython.translator.backendopt.test.test_malloc import TestLLTypeMallocRemoval as LLTypeMallocRemovalTest -from rpython.translator.backendopt.test.test_malloc import TestOOTypeMallocRemoval as OOTypeMallocRemovalTest +from rpython.translator.backendopt.test.test_malloc import TestMallocRemoval as MallocRemovalTest from rpython.translator.translator import TranslationContext, graphof from rpython.flowspace.model import Constant, summary -from rpython.annotator import model as annmodel from rpython.rtyper.llinterp import LLInterpreter from rpython.rlib.rarithmetic import intmask from rpython.conftest import option @@ -43,8 +41,9 @@ LARGE_THRESHOLD = 10*INLINE_THRESHOLD_FOR_TEST HUGE_THRESHOLD = 100*INLINE_THRESHOLD_FOR_TEST -class BaseTester(object): - type_system = None +class TestLLType(object): + type_system = 'lltype' + check_malloc_removed = MallocRemovalTest.check_malloc_removed def translateopt(self, func, sig, **optflags): t = TranslationContext() @@ -61,7 +60,7 @@ assert big() == 83 t = self.translateopt(big, [], inline_threshold=HUGE_THRESHOLD, - mallocs=True) + mallocs=True) big_graph = graphof(t, big) self.check_malloc_removed(big_graph) @@ -128,7 +127,7 @@ return res def g(x): - return s(100) + s(1) + x + return s(100) + s(1) + x def idempotent(n1, n2): c = [i for i in range(n2)] @@ -232,10 +231,6 @@ graph = graphof(t, fn) assert "direct_call" not in summary(graph) -class TestLLType(BaseTester): - type_system = 'lltype' - check_malloc_removed = LLTypeMallocRemovalTest.check_malloc_removed - def test_list_comp(self): def f(n1, n2): c = [i for i in range(n2)] @@ -296,9 +291,3 @@ llinterp = LLInterpreter(t.rtyper) res = llinterp.eval_graph(later_graph, [10]) assert res == 1 - - -class TestOOType(BaseTester): - type_system = 'ootype' - check_malloc_removed = OOTypeMallocRemovalTest.check_malloc_removed - diff --git a/rpython/translator/backendopt/test/test_canraise.py b/rpython/translator/backendopt/test/test_canraise.py --- a/rpython/translator/backendopt/test/test_canraise.py +++ b/rpython/translator/backendopt/test/test_canraise.py @@ -3,16 +3,13 @@ from rpython.translator.simplify import get_funcobj from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.all import backend_optimizations -from rpython.rtyper.test.tool import LLRtypeMixin, OORtypeMixin from rpython.conftest import option -class BaseTestCanRaise(object): - type_system = None - +class TestCanRaise(object): def translate(self, func, sig): t = TranslationContext() t.buildannotator().build_types(func, sig) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper(type_system='lltype').specialize() if option.view: t.view() return t, RaiseAnalyzer(t) @@ -136,7 +133,7 @@ obj = B() f(obj) m(obj) - + t, ra = self.translate(h, [int]) hgraph = graphof(t, h) # fiiiish :-( @@ -179,7 +176,7 @@ # an indirect call without a list of graphs from rpython.rlib.objectmodel import instantiate class A: - pass + pass class B(A): pass def g(x): @@ -195,7 +192,6 @@ result = ra.can_raise(fgraph.startblock.operations[0]) assert result -class TestLLType(LLRtypeMixin, BaseTestCanRaise): def test_llexternal(self): from rpython.rtyper.lltypesystem.rffi import llexternal from rpython.rtyper.lltypesystem import lltype @@ -231,8 +227,3 @@ fgraph = graphof(t, f) result = ra.can_raise(fgraph.startblock.operations[0]) assert not result - - -class TestOOType(OORtypeMixin, BaseTestCanRaise): - def test_can_raise_recursive(self): - py.test.skip("ootype: no explicit stack checks raising RuntimeError") diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py --- a/rpython/translator/backendopt/test/test_finalizer.py +++ b/rpython/translator/backendopt/test/test_finalizer.py @@ -10,18 +10,16 @@ from rpython.rlib import rgc -class BaseFinalizerAnalyzerTests(object): +class TestFinalizerAnalyzer(object): """ Below are typical destructors that we encounter in pypy """ - type_system = None - def analyze(self, func, sig, func_to_analyze=None, backendopt=False): if func_to_analyze is None: func_to_analyze = func t = TranslationContext() t.buildannotator().build_types(func, sig) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper(type_system='lltype').specialize() if backendopt: backend_optimizations(t) if option.view: @@ -61,14 +59,10 @@ v3], None)) assert not f.analyze(SpaceOperation('bare_setfield', [v1, Constant('z'), v4], None)) - - -class TestLLType(BaseFinalizerAnalyzerTests): - type_system = 'lltype' def test_malloc(self): S = lltype.GcStruct('S') - + def f(): return lltype.malloc(S) @@ -77,7 +71,7 @@ def test_raw_free_getfield(self): S = lltype.Struct('S') - + class A(object): def __init__(self): self.x = lltype.malloc(S, flavor='raw') @@ -100,7 +94,7 @@ def g(): p = lltype.malloc(C, 3, flavor='raw') f(p) - + def f(p): c(rffi.ptradd(p, 0)) lltype.free(p, flavor='raw') @@ -112,7 +106,7 @@ class B(object): def __init__(self): self.counter = 1 - + class A(object): def __init__(self): self.x = B() @@ -137,6 +131,3 @@ pass self.analyze(g, []) # did not explode py.test.raises(FinalizerError, self.analyze, f, []) - -class TestOOType(BaseFinalizerAnalyzerTests): - type_system = 'ootype' diff --git a/rpython/translator/backendopt/test/test_inline.py b/rpython/translator/backendopt/test/test_inline.py --- a/rpython/translator/backendopt/test/test_inline.py +++ b/rpython/translator/backendopt/test/test_inline.py @@ -13,7 +13,7 @@ from rpython.translator.backendopt.checkvirtual import check_virtual_methods from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.llinterp import LLInterpreter -from rpython.rtyper.test.tool import LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.rarithmetic import ovfcheck from rpython.translator.test.snippet import is_perfect_number from rpython.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST @@ -51,12 +51,8 @@ def __init__(self): self.data2 = 456 -class BaseTestInline: - type_system = None - - def _skip_oo(self, reason): - if self.type_system == 'ootype': - py.test.skip("ootypesystem doesn't support %s, yet" % reason) +class TestInline(BaseRtypingTest): + type_system = 'lltype' def translate(self, func, argtypes): t = TranslationContext() @@ -119,7 +115,7 @@ if remove_same_as: for graph in t.graphs: removenoops.remove_same_as(graph) - + if heuristic is not None: kwargs = {"heuristic": heuristic} else: @@ -192,7 +188,7 @@ result = eval_func([1]) assert result == 1 result = eval_func([2]) - assert result == 2 + assert result == 2 def test_inline_several_times(self): def f(x): @@ -587,9 +583,6 @@ eval_func([-66]) eval_func([282]) - -class TestInlineLLType(LLRtypeMixin, BaseTestInline): - def test_correct_keepalive_placement(self): def h(x): if not x: @@ -638,158 +631,3 @@ assert len(collect_called_graphs(f_graph, t)) == 1 auto_inline_graphs(t, [f_graph], 32, inline_graph_from_anywhere=True) assert len(collect_called_graphs(f_graph, t)) == 0 - - -class TestInlineOOType(OORtypeMixin, BaseTestInline): - - def test_rtype_r_dict_exceptions(self): - from rpython.rlib.objectmodel import r_dict - def raising_hash(obj): - if obj.startswith("bla"): - raise TypeError - return 1 - def eq(obj1, obj2): - return obj1 is obj2 - def f(): - d1 = r_dict(eq, raising_hash) - d1['xxx'] = 1 - try: - x = d1["blabla"] - except Exception: - return 42 - return x - - eval_func, t = self.check_auto_inlining(f, []) - res = eval_func([]) - assert res == 42 - - def test_float(self): - ex = ['', ' '] - def fn(i): - s = ex[i] - try: - return float(s) - except ValueError: - return -999.0 - - eval_func, t = self.check_auto_inlining(fn, [int]) - expected = fn(0) - res = eval_func([0]) - assert res == expected - - def test_oosend(self): - class A: - def foo(self, x): - return x - def fn(x): - a = A() - return a.foo(x) - - eval_func, t = self.check_auto_inlining(fn, [int], checkvirtual=True) - expected = fn(42) - res = eval_func([42]) - assert res == expected - - def test_not_inline_oosend(self): - class A: - def foo(self, x): - return x - class B(A): - def foo(self, x): - return x+1 - - def fn(flag, x): - if flag: - obj = A() - else: - obj = B() - return obj.foo(x) - - eval_func, t = self.check_auto_inlining(fn, [bool, int], checkvirtual=True) - expected = fn(True, 42) - res = eval_func([True, 42]) - assert res == expected - - def test_oosend_inherited(self): - class BaseStringFormatter: - def __init__(self): - self.fmtpos = 0 - def forward(self): - self.fmtpos += 1 - - class StringFormatter(BaseStringFormatter): - def __init__(self, fmt): - BaseStringFormatter.__init__(self) - self.fmt = fmt - def peekchr(self): - return self.fmt[self.fmtpos] - def peel_num(self): - while True: - self.forward() - c = self.peekchr() - if self.fmtpos == 2: break - return 0 - - class UnicodeStringFormatter(BaseStringFormatter): - pass - - def fn(x): - if x: - fmt = StringFormatter('foo') - return fmt.peel_num() - else: - dummy = UnicodeStringFormatter() - dummy.forward() - return 0 - - eval_func, t = self.check_auto_inlining(fn, [int], checkvirtual=True, - remove_same_as=True) - expected = fn(1) - res = eval_func([1]) - assert res == expected - - def test_classattr(self): - class A: - attr = 666 - class B(A): - attr = 42 - def fn5(): - b = B() - return b.attr - - eval_func, t = self.check_auto_inlining(fn5, [], checkvirtual=True) - res = eval_func([]) - assert res == 42 - - def test_indirect_call_becomes_direct(self): - def h1(n): - return n+1 - def h2(n): - return n+2 - def g(myfunc, n): - return myfunc(n*5) - def f(x, y): - return g(h1, x) + g(h2, y) - eval_func = self.check_inline(g, f, [int, int]) - res = eval_func([10, 173]) - assert res == f(10, 173) - - def test_cannot_inline_1(self): - from rpython.rtyper.lltypesystem import lltype, rffi - for attr in [None, 'try', True]: - def h1(n): - return lltype.malloc(rffi.INTP.TO, 1, flavor='raw') - if attr is not None: - h1._always_inline_ = attr - def f(x): - try: - return h1(x) - except Exception: - return lltype.nullptr(rffi.INTP.TO) - # - def compile(): - self.check_auto_inlining(f, [int]) - if attr is True: - py.test.raises(CannotInline, compile) - else: - compile() # assert does not raise diff --git a/rpython/translator/backendopt/test/test_malloc.py b/rpython/translator/backendopt/test/test_malloc.py --- a/rpython/translator/backendopt/test/test_malloc.py +++ b/rpython/translator/backendopt/test/test_malloc.py @@ -1,22 +1,17 @@ import py -from rpython.translator.backendopt.malloc import LLTypeMallocRemover, OOTypeMallocRemover +from rpython.translator.backendopt.malloc import LLTypeMallocRemover from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.translator import TranslationContext, graphof from rpython.translator import simplify from rpython.flowspace.model import checkgraph, Block, mkentrymap from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.ootypesystem import ootype from rpython.rlib import objectmodel from rpython.conftest import option -class BaseMallocRemovalTest(object): - type_system = None - MallocRemover = None - - def _skip_oo(self, msg): - if self.type_system == 'ootype': - py.test.skip(msg) +class TestMallocRemoval(object): + type_system = 'lltype' + MallocRemover = LLTypeMallocRemover def check_malloc_removed(cls, graph): remover = cls.MallocRemover() @@ -39,7 +34,7 @@ remover = self.MallocRemover() t = TranslationContext() t.buildannotator().build_types(fn, signature) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper(type_system='lltype').specialize() graph = graphof(t, fn) if inline is not None: from rpython.translator.backendopt.inline import auto_inline_graphs @@ -154,10 +149,6 @@ -class TestLLTypeMallocRemoval(BaseMallocRemovalTest): - type_system = 'lltype' - MallocRemover = LLTypeMallocRemover - def test_dont_remove_with__del__(self): import os delcalls = [0] @@ -349,35 +340,3 @@ u[0].s.x = x return u[0].s.x graph = self.check(f, [int], [42], 42) - - -class TestOOTypeMallocRemoval(BaseMallocRemovalTest): - type_system = 'ootype' - MallocRemover = OOTypeMallocRemover - - def test_oononnull(self): - FOO = ootype.Instance('Foo', ootype.ROOT) - def fn(): - s = ootype.new(FOO) - return bool(s) - self.check(fn, [], [], True) - - def test_classattr_as_defaults(self): - class Bar: - foo = 41 - - def fn(): - x = Bar() - x.foo += 1 - return x.foo - self.check(fn, [], [], 42) - - def test_fn5(self): - # don't test this in ootype because the class attribute access - # is turned into an oosend which prevents malloc removal to - # work unless we inline first. See test_classattr in - # test_inline.py - py.test.skip("oosend prevents malloc removal") - - def test_bogus_cast_pointer(self): - py.test.skip("oosend prevents malloc removal") diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -1,6 +1,5 @@ import py from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.ootypesystem import ootype from rpython.translator.translator import TranslationContext, graphof from rpython.translator.simplify import get_funcobj from rpython.translator.backendopt.writeanalyze import WriteAnalyzer, top_set @@ -11,9 +10,9 @@ class BaseTest(object): - type_system = None + type_system = 'lltype' Analyzer = WriteAnalyzer - + def translate(self, func, sig): t = TranslationContext() t.buildannotator().build_types(func, sig) @@ -23,7 +22,7 @@ return t, self.Analyzer(t) -class BaseTestWriteAnalyze(BaseTest): +class TestWriteAnalyze(BaseTest): def test_writes_simple(self): def g(x): @@ -149,7 +148,7 @@ obj = B() f(obj) m(obj) - + t, wa = self.translate(h, [int]) hgraph = graphof(t, h) # fiiiish :-( @@ -176,7 +175,7 @@ # an indirect call without a list of graphs from rpython.rlib.objectmodel import instantiate class A: - pass + pass class B(A): pass def g(x): @@ -190,10 +189,7 @@ t, wa = self.translate(f, [int]) fgraph = graphof(t, f) result = wa.analyze(fgraph.startblock.operations[0]) - if self.type_system == 'lltype': - assert result is top_set - else: - assert not result # ootype is more precise in this case + assert result is top_set def test_llexternal(self): from rpython.rtyper.lltypesystem.rffi import llexternal @@ -224,10 +220,6 @@ result = wa.analyze(ggraph.startblock.operations[-1]) assert not result - -class TestLLtype(BaseTestWriteAnalyze): - type_system = 'lltype' - def test_list(self): def g(x, y, z): return f(x, y, z) @@ -284,46 +276,8 @@ assert name.endswith("foobar") -class TestOOtype(BaseTestWriteAnalyze): - type_system = 'ootype' - - def test_array(self): - def g(x, y, z): - return f(x, y, z) - def f(x, y, z): - l = [0] * x - l[1] = 42 - return len(l) + z - - t, wa = self.translate(g, [int, int, int]) - ggraph = graphof(t, g) - assert ggraph.startblock.operations[0].opname == 'direct_call' - - result = sorted(wa.analyze(ggraph.startblock.operations[0])) - assert len(result) == 1 - array, A = result[0] - assert array == 'array' - assert A.ITEM is ootype.Signed - - def test_list(self): - def g(x, y, z): - return f(x, y, z) - def f(x, y, z): - l = [0] * x - l.append(z) - return len(l) + z - - t, wa = self.translate(g, [int, int, int]) - ggraph = graphof(t, g) - assert ggraph.startblock.operations[0].opname == 'direct_call' - - result = wa.analyze(ggraph.startblock.operations[0]) - assert result is top_set - - class TestLLtypeReadWriteAnalyze(BaseTest): Analyzer = ReadWriteAnalyzer - type_system = 'lltype' def test_read_simple(self): def g(x): @@ -346,7 +300,7 @@ def h(flag): obj = A(flag) return obj.f() - + t, wa = self.translate(h, [int]) hgraph = graphof(t, h) op_call_f = hgraph.startblock.operations[-1] From noreply at buildbot.pypy.org Sun Jul 7 14:22:32 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 14:22:32 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove rpython/translator/oosupport Message-ID: <20130707122232.38DE41C0512@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65255:142b22c6157a Date: 2013-07-07 14:21 +0200 http://bitbucket.org/pypy/pypy/changeset/142b22c6157a/ Log: Remove rpython/translator/oosupport diff too long, truncating to 2000 out of 3550 lines diff --git a/rpython/translator/oosupport/__init__.py b/rpython/translator/oosupport/__init__.py deleted file mode 100644 --- a/rpython/translator/oosupport/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ - -""" -This module contains code and tests that can be shared between -the various ootypesystem based backends. -""" diff --git a/rpython/translator/oosupport/constant.py b/rpython/translator/oosupport/constant.py deleted file mode 100644 --- a/rpython/translator/oosupport/constant.py +++ /dev/null @@ -1,779 +0,0 @@ -""" -___________________________________________________________________________ -Constants - -Complex code for representing constants. For each complex constant, -we create an object and record it in the database. These objects -contain the knowledge about how to access the value of the constant, -as well as the how to initialize it. The constants are initialized in -two phases so that interdependencies do not prevent a problem. - -The initialization process works in two phases: - -1. create_pointer(): this creates uninitialized pointers, so that - circular references can be handled. - -2. initialize_data(): initializes everything else. The constants are - first sorted by PRIORITY so that CustomDicts are initialized last. - -These two methods will be invoked by the ConstantGenerator's gen_constants() -routine. - -A backend will typically create its own version of each kind of Const class, -adding at minimum a push() and store() method. A custom variant of -BaseConstantGenerator is also needed. These classes can also be chosen -by the genoo.py subclass of the backend -""" - -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.lltypesystem import rffi -import operator - -MAX_CONST_PER_STEP = 40 - -PRIMITIVE_TYPES = set([ootype.Void, ootype.Bool, ootype.Char, ootype.UniChar, - ootype.Float, ootype.Signed, ootype.Unsigned, - ootype.String, ootype.Unicode, ootype.SignedLongLong, - ootype.UnsignedLongLong, rffi.SHORT]) - -def is_primitive(TYPE): - return TYPE in PRIMITIVE_TYPES - -def get_primitive_constant(TYPE, value): - if is_primitive(TYPE): - return TYPE, value - if TYPE is ootype.Object: - obj = value.obj - T2 = ootype.typeOf(obj) - if obj is not None and is_primitive(T2): - return T2, obj - return None, None - -def push_constant(db, TYPE, value, gen): - """ General method that pushes the value of the specified constant - onto the stack. Use this when you want to load a constant value. - May or may not create an abstract constant object. - - db --- a Database - TYPE --- the ootype of the constant - value --- the ootype instance (ootype._list, int, etc) - gen --- a metavm.Generator - """ - - constgen = db.constant_generator - - TYPE2, value2 = get_primitive_constant(TYPE, value) - if TYPE2 is not None: - return constgen.push_primitive_constant(gen, TYPE2, value2) - - const = constgen.record_const(value) - if const.is_inline(): - const.push_inline(gen, TYPE) - else: - constgen.push_constant(gen, const) - if TYPE is not const.OOTYPE(): - constgen.downcast_constant(gen, value, TYPE) - -# ______________________________________________________________________ -# Constant generator -# -# The back-end can specify which constant generator to use by setting -# genoo.ConstantGenerator to the appropriate class. The -# ConstantGenerator handles invoking the constant's initialization -# routines, as well as loading and storing them. -# -# For the most part, no code needs to interact with the constant -# generator --- the rest of the code base should always invoke -# push_constant(), which may delegate to the constant generator if -# needed. - -class BaseConstantGenerator(object): - - def __init__(self, db): - self.db = db - self.genoo = db.genoo - self.cache = {} - - # _________________________________________________________________ - # Constant Operations - # - # Methods for loading and storing the value of constants. Clearly, - # storing the value of a constant is only done internally. These - # must be overloaded by the specific backend. Note that there - # are some more specific variants below that do not have to be overloaded - # but may be. - - def push_constant(self, gen, const): - """ - gen --- a generator - const --- an AbstractConst object - - Loads the constant onto the stack. Can be invoked at any time. - """ - raise NotImplementedError - - def _store_constant(self, gen, const): - """ - gen --- a generator - const --- an AbstractConst object - - stores the constant from the stack - """ - raise NotImplementedError - - # _________________________________________________________________ - # Optional Constant Operations - # - # These allow various hooks at convenient times. All of them are - # already implemented and you don't need to overload them. - - def push_primitive_constant(self, gen, TYPE, value): - """ Invoked when an attempt is made to push a primitive - constant. Normally just passes the call onto the code - generator. """ - gen.push_primitive_constant(TYPE, value) - - def downcast_constant(self, gen, const, EXPECTED_TYPE): - """ Invoked when the expected type of a const does not match - const.OOTYPE(). The constant has been pushed. Normally just - invokes gen.downcast. When it finishes, constant should still - be on the stack. """ - gen.downcast(EXPECTED_TYPE) - - def _init_constant(self, const): - """ - const --- a freshly created AbstractConst object - - Gives the generator a chance to set any fields it wants on the - constant just after the object is first created. Not invoked - while generating constant initialization code, but before. - """ - pass - - def _push_constant_during_init(self, gen, const): - """ - gen --- a generator - const --- an AbstractConst object - - Just like push_constant, but only invoked during - initialization. By default simply invokes push_constant(). - """ - return self.push_constant(gen, const) - - def _pre_store_constant(self, gen, const): - """ - gen --- a generator - const --- an AbstractConst object - - invoked before the constant's create_pointer() routine is - called, to prepare the stack in any way needed. Typically - does nothing, but sometimes pushes the 'this' pointer if the - constant will be stored in the field of a singleton object. - """ - pass - - def _get_key_for_const(self, value): - return value - - # _________________________________________________________________ - # Constant Object Creation - # - # Code that deals with creating AbstractConst objects and recording - # them. You should not need to change anything here. - - def record_const(self, value): - """ Returns an object representing the constant, remembering - also any details needed to initialize the constant. value - should be an ootype constant value. Not generally called - directly, but it can be if desired. """ - assert not is_primitive(value) - if isinstance(value, ootype._object) and value: # leave ootype.NULL as is - value = value.obj - self.db.cts.lltype_to_cts(value._TYPE) # record const - if value in self.cache: - return self.cache[value] - const = self._create_complex_const(value) - key = self._get_key_for_const(value) - self.cache[key] = const - self._init_constant(const) - const.record_dependencies() - return const - - def _create_complex_const(self, value): - - """ A helper method which creates a Constant wrapper object for - the given value. Uses the types defined in the sub-class. """ - - # Determine if the static type differs from the dynamic type. - if isinstance(value, ootype._view): - static_type = value._TYPE - value = value._inst - else: - static_type = None - - # Find the appropriate kind of Const object. - genoo = self.genoo - uniq = self.db.unique() - if isinstance(value, ootype._instance): - return genoo.InstanceConst(self.db, value, static_type, uniq) - elif isinstance(value, ootype._record): - return genoo.RecordConst(self.db, value, uniq) - elif isinstance(value, ootype._class): - return genoo.ClassConst(self.db, value, uniq) - elif isinstance(value, ootype._list): - return genoo.ListConst(self.db, value, uniq) - elif isinstance(value, ootype._array): - return genoo.ArrayConst(self.db, value, uniq) - elif isinstance(value, ootype._static_meth): - return genoo.StaticMethodConst(self.db, value, uniq) - elif isinstance(value, ootype._custom_dict): - return genoo.CustomDictConst(self.db, value, uniq) - elif isinstance(value, ootype._dict): - return genoo.DictConst(self.db, value, uniq) - elif isinstance(value, ootype._weak_reference): - return genoo.WeakRefConst(self.db, value, uniq) - elif value is ootype.null(value._TYPE): - # for NULL values, we can just use "NULL" const. This is - # a fallback since we sometimes have constants of - # unhandled types which are equal to NULL. - return genoo.NullConst(self.db, value, uniq) - else: - assert False, 'Unknown constant: %s' % value - - # _________________________________________________________________ - # Constant Generation - # - # You will not generally need to overload any of the functions - # in this section. - - def gen_constants(self, ilasm): - - # Sort constants by priority. Don't bother with inline - # constants. - all_constants = [c for c in self.cache.values() if not c.is_inline()] - all_constants.sort(key=lambda c: (c.PRIORITY, c.count)) - - # Counters to track how many steps we have emitted so far, etc. - # See _consider_step() for more information. - self._step_counter = 0 - self._all_counter = 0 - - # Now, emit the initialization code: - gen = self._begin_gen_constants(ilasm, all_constants) - for const in all_constants: - self._declare_const(gen, const) - self._create_pointers(gen, all_constants) - self._initialize_data(gen, all_constants) - self._end_step(gen) - self._end_gen_constants(gen, self._step_counter) - - def _create_pointers(self, gen, all_constants): - """ Iterates through each constant, creating the pointer for it - and storing it. """ - gen.add_section("Create Pointer Phase") - for const in all_constants: - gen.add_comment("Constant: %s" % const.name) - self._pre_store_constant(gen, const) - self._consider_step(gen) - const.create_pointer(gen) - self._store_constant(gen, const) - - def _initialize_data(self, gen, all_constants): - """ Iterates through each constant, initializing its data. """ - gen.add_section("Initialize Data Phase") - for const in all_constants: - self._consider_step(gen) - gen.add_comment("Constant: %s" % const.name) - self._push_constant_during_init(gen, const) - self.current_const = const - if not const.initialize_data(self, gen): - gen.pop(const.OOTYPE()) - - def _consider_step(self, gen): - """ Considers whether to start a new step at this point. We - start a new step every so often to ensure the initialization - functions don't get too large and upset mono or the JVM or - what have you. """ - if self._all_counter % MAX_CONST_PER_STEP == 0: - self._new_step(gen) - self._all_counter += 1 - - def _consider_split_current_function(self, gen): - """ - Called during constant initialization; if the backend thinks - the current function is too large, it can close it and open a - new one, pushing again the constant on the stack. The default - implementatio does nothing. - """ - pass - - def _new_step(self, gen): - self._end_step(gen) - self._declare_step(gen, self._step_counter) # open the next step - - def _end_step(self, gen): - """ Ends the current step if one has begun. """ - if self._all_counter != 0: - self._close_step(gen, self._step_counter) # close previous step - self._step_counter += 1 - - # _________________________________________________________________ - # Abstract functions you must overload - - def _begin_gen_constants(self, ilasm, all_constants): - """ Invoked with the assembler and sorted list of constants - before anything else. Expected to return a generator that will - be passed around after that (the parameter named 'gen'). """ - raise NotImplementedError - - def _declare_const(self, gen, const): - """ Invoked once for each constant before any steps are created. """ - raise NotImplementedError - - def _declare_step(self, gen, stepnum): - """ Invoked to begin step #stepnum. stepnum starts with 0 (!) - and proceeds monotonically. If _declare_step() is invoked, - there will always be a corresponding call to _close_step(). """ - raise NotImplementedError - - def _close_step(self, gen, stepnum): - """ Invoked to end step #stepnum. Never invoked without a - corresponding call from _declare_step() first. """ - raise NotImplementedError - - def _end_gen_constants(self, gen, numsteps): - """ Invoked as the very last thing. numsteps is the total number - of steps that were created. """ - raise NotImplementedError - -# ______________________________________________________________________ -# Constant base class - -class AbstractConst(object): - PRIORITY = 0 - - def __init__(self, db, value, count): - self.db = db - self.cts = db.genoo.TypeSystem(db) - self.value = value - self.count = count - - # ____________________________________________________________ - # Hashing, equality comparison, and repr() - # - # Overloaded so that two AbstactConst objects representing - # the same OOValue are equal. Provide a sensible repr() - - def __hash__(self): - return hash(self.value) - - def __eq__(self, other): - return self.value == other.value - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return '' % (self.name, self.value) - - # ____________________________________________________________ - # Simple query routines - - def OOTYPE(self): - return self.value._TYPE - - def get_name(self): - pass - - def is_null(self): - return self.value is ootype.null(self.value._TYPE) - - def is_inline(self): - """ - Inline constants are not stored as static fields in the - Constant class, but they are newly created on the stack every - time they are used. Classes overriding is_inline should - override push_inline too. By default only NULL constants are - inlined. - """ - return self.is_null() - - def push_inline(self, gen, EXPECTED_TYPE): - """ - Invoked by push_constant() when is_inline() returns true. - By default, just pushes NULL as only NULL constants are inlined. - If you overload this, overload is_inline() too. - """ - assert self.is_inline() and self.is_null() - return gen.push_null(EXPECTED_TYPE) - - # ____________________________________________________________ - # Initializing the constant - - def record_dependencies(self): - """ - Ensures that all dependent objects are added to the database, - and any classes that are used are loaded. Called when the - constant object is created. - """ - raise NotImplementedError - - def create_pointer(self, gen): - """ - Creates the pointer representing this object, but does not - initialize its fields. First phase of initialization. - """ - assert not self.is_null() - gen.new(self.value._TYPE) - - def initialize_data(self, constgen, gen): - """ - Initializes the internal data. Begins with a pointer to - the constant on the stack. Normally returns something - false (like, say, None) --- but returns True if it consumes - the pointer from the stack in the process; otherwise, a pop - is automatically inserted afterwards. - """ - raise NotImplementedError - - # ____________________________________________________________ - # Internal helpers - - def _record_const_if_complex(self, TYPE, value): - TYPE2, value2 = get_primitive_constant(TYPE, value) - if not TYPE2: - self.db.constant_generator.record_const(value) - - -# ______________________________________________________________________ -# Null Values -# -# NULL constants of types for which we have no better class use this -# class. For example, dict item iterators and the like. - -class NullConst(AbstractConst): - def __init__(self, db, value, count): - AbstractConst.__init__(self, db, value, count) - self.name = 'NULL__%d' % count - assert self.is_null() and self.is_inline() - - def record_dependencies(self): - return - -# ______________________________________________________________________ -# Records - -class RecordConst(AbstractConst): - def __init__(self, db, record, count): - AbstractConst.__init__(self, db, record, count) - self.name = 'RECORD__%d' % count - - def record_dependencies(self): - if self.value is ootype.null(self.value._TYPE): - return - for f_name, (FIELD_TYPE, f_default) in self.value._TYPE._fields.iteritems(): - value = self.value._items[f_name] - self._record_const_if_complex(FIELD_TYPE, value) - - def initialize_data(self, constgen, gen): - assert not self.is_null() - SELFTYPE = self.value._TYPE - for f_name, (FIELD_TYPE, f_default) in self.value._TYPE._fields.iteritems(): - if FIELD_TYPE is not ootype.Void: - gen.dup(SELFTYPE) - value = self.value._items[f_name] - push_constant(self.db, FIELD_TYPE, value, gen) - gen.set_field(SELFTYPE, f_name) - -# ______________________________________________________________________ -# Instances - -class InstanceConst(AbstractConst): - def __init__(self, db, obj, static_type, count): - AbstractConst.__init__(self, db, obj, count) - if static_type is None: - self.static_type = obj._TYPE - else: - self.static_type = static_type - db.genoo.TypeSystem(db).lltype_to_cts( - obj._TYPE) # force scheduling of obj's class - class_name = db.class_name(obj._TYPE).replace('.', '_') - self.name = '%s__%d' % (class_name, count) - - def record_dependencies(self): - if not self.value: - return - - INSTANCE = self.value._TYPE - while INSTANCE is not None: - for name, (TYPE, default) in INSTANCE._fields.iteritems(): - if TYPE is ootype.Void: - continue - type_ = self.cts.lltype_to_cts(TYPE) # record type - value = getattr(self.value, name) # record value - self._record_const_if_complex(TYPE, value) - INSTANCE = INSTANCE._superclass - - def is_null(self): - return not self.value - - def _sorted_const_list(self): - # XXX, horrible hack: first collect all consts, then render - # CustomDicts at last because their ll_set could need other - # fields already initialized. We should really think a more - # general way to handle such things. - const_list = [] - INSTANCE = self.value._TYPE - while INSTANCE is not None: - for name, (TYPE, default) in INSTANCE._fields.iteritems(): - if TYPE is ootype.Void: - continue - value = getattr(self.value, name) - const_list.append((TYPE, INSTANCE, name, value)) - INSTANCE = INSTANCE._superclass - - def mycmp(x, y): - if isinstance(x[0], ootype.CustomDict) and not isinstance(y[0], ootype.CustomDict): - return 1 # a CustomDict is always greater than non-CustomDicts - elif isinstance(y[0], ootype.CustomDict) and not isinstance(x[0], ootype.CustomDict): - return -1 # a non-CustomDict is always less than CustomDicts - else: - return cmp(x, y) - const_list.sort(mycmp) - - return const_list - - def initialize_data(self, constgen, gen): - assert not self.is_null() - - # Get a list of all the constants we'll need to initialize. - # I am not clear on why this needs to be sorted, actually, - # but we sort it. - const_list = self._sorted_const_list() - - # Push ourself on the stack, and cast to our actual type if it - # is not the same as our static type - SELFTYPE = self.value._TYPE - if SELFTYPE is not self.static_type: - gen.downcast(SELFTYPE) - - # Store each of our fields in the sorted order - for FIELD_TYPE, INSTANCE, name, value in const_list: - constgen._consider_split_current_function(gen) - gen.dup(SELFTYPE) - push_constant(self.db, FIELD_TYPE, value, gen) - gen.set_field(INSTANCE, name) - -# ______________________________________________________________________ -# Class constants - -class ClassConst(AbstractConst): - def __init__(self, db, class_, count): - AbstractConst.__init__(self, db, class_, count) - self.name = 'CLASS__%d' % count - - def record_dependencies(self): - INSTANCE = self.value._INSTANCE - if INSTANCE is not None: - self.cts.lltype_to_cts(INSTANCE) # force scheduling class generation - - def is_null(self): - return self.value._INSTANCE is None - - def create_pointer(self, gen): - assert not self.is_null() - INSTANCE = self.value._INSTANCE - gen.getclassobject(INSTANCE) - - def initialize_data(self, constgen, gen): - pass - -# ______________________________________________________________________ -# List constants - -class ListConst(AbstractConst): - def __init__(self, db, list, count): - AbstractConst.__init__(self, db, list, count) - self.name = 'LIST__%d' % count - - def record_dependencies(self): - if not self.value: - return - for item in self.value._list: - self._record_const_if_complex(self.value._TYPE.ITEM, item) - - def create_pointer(self, gen): - assert not self.is_null() - SELFTYPE = self.value._TYPE - - # XXX --- should we add something special to the generator for - # this? I want it to look exactly like it would in normal - # opcodes...but of course under current system I can't know - # what normal opcodes would look like as they fall under the - # perview of each backend rather than oosupport - - # Create the list - gen.new(SELFTYPE) - - # And then resize it to the correct size - gen.dup(SELFTYPE) - push_constant(self.db, ootype.Signed, len(self.value._list), gen) - gen.call_method(SELFTYPE, '_ll_resize') - - def _do_not_initialize(self): - """ Returns True if the list should not be initialized; this - can be overloaded by the backend if your conditions are wider. - The default is not to initialize if the list is a list of - Void. """ - return self.value._TYPE.ITEM is ootype.Void - - def initialize_data(self, constgen, gen): - assert not self.is_null() - SELFTYPE = self.value._TYPE - ITEM = self.value._TYPE.ITEM - - # check for special cases and avoid initialization - if self._do_not_initialize(): - return - - # set each item in the list using the OOTYPE methods - for idx, item in enumerate(self.value._list): - constgen._consider_split_current_function(gen) - gen.dup(SELFTYPE) - push_constant(self.db, ootype.Signed, idx, gen) - push_constant(self.db, ITEM, item, gen) - gen.prepare_generic_argument(ITEM) - gen.call_method(SELFTYPE, 'll_setitem_fast') - -# ______________________________________________________________________ -# Array constants - -class ArrayConst(AbstractConst): - def __init__(self, db, list, count): - AbstractConst.__init__(self, db, list, count) - self.name = 'ARRAY__%d' % count - - def record_dependencies(self): - if not self.value: - return - for item in self.value._array: - self._record_const_if_complex(self.value._TYPE.ITEM, item) - - def create_pointer(self, gen): - from rpython.flowspace.model import Constant - assert not self.is_null() - SELFTYPE = self.value._TYPE - - # Create the array - length = Constant(len(self.value._array), ootype.Signed) - gen.oonewarray(SELFTYPE, length) - - def _do_not_initialize(self): - """ Returns True if the array should not be initialized; this - can be overloaded by the backend if your conditions are wider. - The default is not to initialize if the array is a array of - Void. """ - return self.value._TYPE.ITEM is ootype.Void - - def initialize_data(self, constgen, gen): - assert not self.is_null() - SELFTYPE = self.value._TYPE - ITEM = self.value._TYPE.ITEM - - # check for special cases and avoid initialization - if self._do_not_initialize(): - return - - # set each item in the list using the OOTYPE methods - for idx, item in enumerate(self.value._array): - constgen._consider_split_current_function(gen) - gen.dup(SELFTYPE) - push_constant(self.db, ootype.Signed, idx, gen) - push_constant(self.db, ITEM, item, gen) - self._setitem(SELFTYPE, gen) - - def _setitem(self, SELFTYPE, gen): - gen.call_method(SELFTYPE, 'll_setitem_fast') - -# ______________________________________________________________________ -# Dictionary constants - -class DictConst(AbstractConst): - PRIORITY = 90 - - def __init__(self, db, dict, count): - AbstractConst.__init__(self, db, dict, count) - self.name = 'DICT__%d' % count - - def record_dependencies(self): - if not self.value: - return - - for key, value in self.value._dict.iteritems(): - self._record_const_if_complex(self.value._TYPE._KEYTYPE, key) - self._record_const_if_complex(self.value._TYPE._VALUETYPE, value) - - def initialize_data(self, constgen, gen): - assert not self.is_null() - SELFTYPE = self.value._TYPE - KEYTYPE = self.value._TYPE._KEYTYPE - VALUETYPE = self.value._TYPE._VALUETYPE - - gen.add_comment('Initializing dictionary constant') - - if KEYTYPE is ootype.Void and VALUETYPE is ootype.Void: - return - - for key, value in self.value._dict.iteritems(): - constgen._consider_split_current_function(gen) - gen.dup(SELFTYPE) - gen.add_comment(' key=%r value=%r' % (key,value)) - push_constant(self.db, KEYTYPE, key, gen) - gen.prepare_generic_argument(KEYTYPE) - push_constant(self.db, VALUETYPE, value, gen) - gen.prepare_generic_argument(VALUETYPE) - gen.call_method(SELFTYPE, 'll_set') - -class CustomDictConst(DictConst): - PRIORITY = 100 - -# ______________________________________________________________________ -# Static method constants - -class StaticMethodConst(AbstractConst): - def __init__(self, db, sm, count): - AbstractConst.__init__(self, db, sm, count) - self.name = 'DELEGATE__%d' % count - - def record_dependencies(self): - if self.value is ootype.null(self.value._TYPE): - return - if hasattr(self.value, 'graph'): - self.db.pending_function(self.value.graph) - self.delegate_type = self.db.record_delegate(self.value._TYPE) - - def initialize_data(self, constgen, gen): - raise NotImplementedError - -# ______________________________________________________________________ -# Weak Reference constants - -class WeakRefConst(AbstractConst): - def __init__(self, db, wref, count): - if wref: - value = wref.ll_deref() - else: - value = None - AbstractConst.__init__(self, db, value, count) - self.name = 'WEAKREF__%d' % count - - def OOTYPE(self): - # Not sure what goes here...? - return None - - def is_null(self): - return self.value is None - - def record_dependencies(self): - if self.value is not None: - self.db.constant_generator.record_const(self.value) diff --git a/rpython/translator/oosupport/database.py b/rpython/translator/oosupport/database.py deleted file mode 100644 --- a/rpython/translator/oosupport/database.py +++ /dev/null @@ -1,77 +0,0 @@ -from rpython.translator.oosupport.constant import is_primitive -from rpython.rtyper.ootypesystem import ootype - -class Database(object): - - def __init__(self, genoo): - self.genoo = genoo - self.cts = genoo.TypeSystem(self) - self._pending_nodes = set() - self._rendered_nodes = set() - self._unique_counter = 0 - self.constant_generator = genoo.ConstantGenerator(self) - self.locked = False # new pending nodes are not allowed here - - # ____________________________________________________________ - # Miscellaneous - - def unique(self): - """ Every time it is called, returns a unique integer. Used in - various places. """ - self._unique_counter+=1 - return self._unique_counter-1 - - def class_name(self, OOINSTANCE): - """ Returns the backend class name of the type corresponding - to OOINSTANCE""" - raise NotImplementedError - - # ____________________________________________________________ - # Generation phases - - def gen_constants(self, ilasm): - """ Renders the constants uncovered during the graph walk""" - self.locked = True # new pending nodes are not allowed here - self.constant_generator.gen_constants(ilasm) - self.locked = False - - # ____________________________________________________________ - # Generation phases - - def record_delegate(self, OOTYPE): - """ Returns a backend-specific type for a delegate class... - details currently undefined. """ - raise NotImplementedError - - # ____________________________________________________________ - # Node creation - # - # Creates nodes for various kinds of things. - - def pending_class(self, INSTANCE): - """ Returns a Node representing the ootype.Instance provided """ - raise NotImplementedError - - def pending_function(self, graph): - """ Returns a Node representing the graph, which is being used as - a static function """ - raise NotImplementedError - - # ____________________________________________________________ - # Basic Worklist Manipulation - - def pending_node(self, node): - """ Adds a node to the worklist, so long as it is not already there - and has not already been rendered. """ - assert not self.locked # sanity check - if node in self._pending_nodes or node in self._rendered_nodes: - return - self._pending_nodes.add(node) - node.dependencies() - - def len_pending(self): - return len(self._pending_nodes) - - def pop(self): - return self._pending_nodes.pop() - diff --git a/rpython/translator/oosupport/function.py b/rpython/translator/oosupport/function.py deleted file mode 100644 --- a/rpython/translator/oosupport/function.py +++ /dev/null @@ -1,441 +0,0 @@ -import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("oosupport") -py.log.setconsumer("oosupport", ansi_log) - -from rpython.flowspace import model as flowmodel -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.oosupport.treebuilder import SubOperation -from rpython.translator.oosupport.metavm import InstructionList, StoreResult -from rpython.tool.identity_dict import identity_dict - - -def render_sub_op(sub_op, db, generator): - op = sub_op.op - instr_list = db.genoo.opcodes.get(op.opname, None) - assert instr_list is not None, 'Unknown opcode: %s ' % op - assert isinstance(instr_list, InstructionList) - assert instr_list[-1] is StoreResult, "Cannot inline an operation that doesn't store the result" - - # record that we know about the type of result and args - db.cts.lltype_to_cts(op.result.concretetype) - for v in op.args: - db.cts.lltype_to_cts(v.concretetype) - - instr_list = InstructionList(instr_list[:-1]) # leave the value on the stack if this is a sub-op - instr_list.render(generator, op) - # now the value is on the stack - -class Function(object): - - auto_propagate_exceptions = False - - def __init__(self, db, graph, name = None, is_method = False, is_entrypoint = False): - self.db = db - self.cts = db.genoo.TypeSystem(db) - self.graph = graph - self.name = self.cts.escape_name(name or graph.name) - self.is_method = is_method - self.is_entrypoint = is_entrypoint - self.generator = None # set in render() - self.label_counters = {} - - # If you want to enumerate args/locals before processing, then - # add these functions into your __init__() [they are defined below] - # self._set_args() - # self._set_locals() - - def current_label(self, prefix='label'): - current = self.label_counters.get(prefix, 0) - return '__%s_%d' % (prefix, current) - - def next_label(self, prefix='label'): - current = self.label_counters.get(prefix, 0) - self.label_counters[prefix] = current+1 - return self.current_label(prefix) - - def get_name(self): - return self.name - - def __repr__(self): - return '' % self.name - - def __hash__(self): - return hash(self.graph) - - def __eq__(self, other): - return self.graph == other.graph - - def __ne__(self, other): - return not self == other - - def _is_return_block(self, block): - return (not block.exits) and len(block.inputargs) == 1 - - def _is_raise_block(self, block): - return (not block.exits) and len(block.inputargs) == 2 - - def _is_exc_handling_block(self, block): - return block.exitswitch == flowmodel.c_last_exception - - def begin_render(self): - raise NotImplementedError - - def render_return_block(self, block): - raise NotImplementedError - - def render_raise_block(self, block): - raise NotImplementedError - - def begin_try(self): - """ Begins a try block; end_try will be called exactly once, then - some number of begin_ and end_catch pairs """ - raise NotImplementedError - - def end_try(self, target_label): - """ Ends the try block, and branchs to the given target_label if - no exception occurred """ - raise NotImplementedError - - def begin_catch(self, llexitcase): - """ Begins a catch block for the exception type specified in - llexitcase""" - raise NotImplementedError - - def end_catch(self, target_label): - """ Ends the catch block, and branchs to the given target_label as the - last item in the catch block """ - raise NotImplementedError - - def render(self, ilasm): - if self.db.graph_name(self.graph) is not None and not self.is_method: - return # already rendered - - self.ilasm = ilasm - self.generator = self._create_generator(self.ilasm) - graph = self.graph - self.begin_render() - - self.return_block = None - self.raise_block = None - for block in graph.iterblocks(): - if self._is_return_block(block): - self.return_block = block - elif self._is_raise_block(block): - self.raise_block = block - else: - self.set_label(self._get_block_name(block)) - if self._is_exc_handling_block(block): - self.render_exc_handling_block(block) - else: - self.render_normal_block(block) - - # render return blocks at the end just to please the .NET - # runtime that seems to need a return statement at the end of - # the function - - self.before_last_blocks() - - if self.raise_block: - self.set_label(self._get_block_name(self.raise_block)) - self.render_raise_block(self.raise_block) - if self.return_block: - self.set_label(self._get_block_name(self.return_block)) - self.render_return_block(self.return_block) - - self.end_render() - if not self.is_method: - self.db.record_function(self.graph, self.name) - - def before_last_blocks(self): - pass - - def render_exc_handling_block(self, block): - # renders all ops but the last one - for op in block.operations[:-1]: - self._render_op(op) - - anyHandler = False - for link in block.exits: - if link.exitcase is None: - continue - anyHandler = anyHandler or \ - not self._auto_propagate(link, block) - - # render the last one (if any!) and prepend a .try - if block.operations: - self.begin_try(anyHandler) - self._render_op(block.operations[-1]) - - # search for the "default" block to be executed when no - # exception is raised - for link in block.exits: - if link.exitcase is None: - self._setup_link(link) - self.end_try(self._get_block_name(link.target), anyHandler) - break - else: - assert False, "No non-exceptional case from exc_handling block" - - # give the backend a chance to see all the exceptions that might - # be caught here. For ex., JVM uses this to convert between - # built-in JVM exceptions to their RPython equivalents - if anyHandler: - self.introduce_exception_conversions( - [link.exitcase for link in block.exits if link.exitcase]) - - # catch the exception and dispatch to the appropriate block - for link in block.exits: - if link.exitcase is None: - continue # see above - assert issubclass(link.exitcase, py.builtin.BaseException) - if self._auto_propagate(link, block): - continue # let the exception propagate - ll_meta_exc = link.llexitcase - self.record_ll_meta_exc(ll_meta_exc) - self.begin_catch(link.llexitcase) - self.store_exception_and_link(link) - target_label = self._get_block_name(link.target) - self.end_catch(target_label) - - self.after_except_block() - - def _auto_propagate(self, link, block): - assert block.exitswitch is flowmodel.c_last_exception - if not self.auto_propagate_exceptions: - return False - if not self._is_raise_block(link.target): - return False - llexc = link.llexitcase - i = list(block.exits).index(link) - next_links = block.exits[i+1:] - for next_link in next_links: - # if one of the next links catches a superclass of llexc, we - # *have* to insert a catch block here, else the exception might be - # caught by the wrong one - if ootype.subclassof(llexc, next_link.llexitcase): - return False - - # if all the checks were ok, it's safe to avoid the catch block and - # let the exception propagate - return True - - def introduce_exception_conversions(self, llexitcases): - """ Called before any catch blocks are emitted with the full set of - exceptions that might be caught """ - return - - def after_except_block(self): - pass - - def record_ll_meta_exc(self, ll_meta_exc): - self.db.constant_generator.record_const(ll_meta_exc) - - def store_exception_and_link(self, link): - raise NotImplementedError - - def render_normal_block(self, block): - for op in block.operations: - self._render_op(op) - - if block.exitswitch is None: - assert len(block.exits) == 1 - link = block.exits[0] - target_label = self._get_block_name(link.target) - self._setup_link(link) - self.generator.branch_unconditionally(target_label) - elif block.exitswitch.concretetype is ootype.Bool: - self.render_bool_switch(block) - elif block.exitswitch.concretetype in (ootype.Signed, ootype.SignedLongLong, - ootype.Unsigned, ootype.UnsignedLongLong, - ootype.Char, ootype.UniChar): - self.render_numeric_switch(block) - else: - assert False, 'Unknonw exitswitch type: %s' % block.exitswitch.concretetype - - def render_bool_switch(self, block): - assert len(block.exits) == 2 - for link in block.exits: - if link.exitcase: - link_true = link - else: - link_false = link - - true_label = self.next_label('link_true') - self.generator.load(block.exitswitch) - self.generator.branch_conditionally(True, true_label) - self._follow_link(link_false) # if here, the exitswitch is false - self.set_label(true_label) - self._follow_link(link_true) # if here, the exitswitch is true - - def render_numeric_switch(self, block): - log.WARNING("The default version of render_numeric_switch is *slow*: please override it in the backend") - self.render_numeric_switch_naive(block) - - def _collect_switch_cases(self, block): - cases = {} - for link in block.exits: - if link.exitcase == "default": - default = link, self.next_label('switch') - else: - if block.exitswitch.concretetype in (ootype.Char, ootype.UniChar): - value = ord(link.exitcase) - else: - value = link.exitcase - cases[value] = link, self.next_label('switch') - - values = cases.keys() - try: - min_case = min(values) - max_case = max(values) - except ValueError: - min_case = max_case = 0 - return cases, min_case, max_case, default - - def _is_sparse_switch(self, cases, min_case, max_case): - if max_case-min_case > 3*len(cases) + 10: # the switch is very sparse, better to use the naive version - return True - return False - - def render_switch_case(self, link, label): - target_label = self._get_block_name(link.target) - self.set_label(label) - self._setup_link(link) - self.generator.branch_unconditionally(target_label) - - def render_numeric_switch_naive(self, block): - for link in block.exits: - target_label = self._get_block_name(link.target) - if link.exitcase == 'default': - self._setup_link(link) - self.generator.branch_unconditionally(target_label) - else: - next_case = self.next_label('next_case') - self.generator.push_primitive_constant(block.exitswitch.concretetype, link.exitcase) - self.generator.load(block.exitswitch) - self.generator.branch_if_not_equal(next_case) - self._setup_link(link) - self.generator.branch_unconditionally(target_label) - self.set_label(next_case) - - def _follow_link(self, link): - target_label = self._get_block_name(link.target) - self._setup_link(link) - self.generator.branch_unconditionally(target_label) - - def _dont_store(self, to_load, to_store): - return False - - def _setup_link(self, link): - target = link.target - linkvars = [] - for to_load, to_store in zip(link.args, target.inputargs): - if isinstance(to_load, flowmodel.Variable) and to_load.name == to_store.name: - continue - if to_load.concretetype is ootype.Void: - continue - if self._dont_store(to_load, to_store): - continue - linkvars.append((to_load, to_store)) - - # after SSI_to_SSA it can happen to have to_load = [a, b] and - # to_store = [b, c]. If we store each variable sequentially, - # 'b' would be overwritten before being read. To solve, we - # first load all the values on the stack, then store in the - # appropriate places. - - if self._trace_enabled(): - self._trace('link', writeline=True) - for to_load, to_store in linkvars: - self._trace_value('%s <-- %s' % (to_store, to_load), to_load) - self._trace('', writeline=True) - - for to_load, to_store in linkvars: - self.generator.load(to_load) - for to_load, to_store in reversed(linkvars): - self.generator.store(to_store) - - def _trace_enabled(self): - return False - - def _trace(self, s): - raise NotImplementedError - - def _trace_value(self, prompt, v): - raise NotImplementedError - - def _render_op(self, op): - instr_list = self.db.genoo.opcodes.get(op.opname, None) - assert instr_list is not None, 'Unknown opcode: %s ' % op - assert isinstance(instr_list, InstructionList) - - if self._trace_enabled(): - self._trace(str(op), writeline=True) - for i, arg in enumerate(op.args): - self._trace_value('Arg %02d' % i, arg) - - instr_list.render(self.generator, op) - - if self._trace_enabled(): - self._trace_value('Result', op.result) - - # ---------------------------------------------------------# - # These methods are quite backend independent, but not # - # used in all backends. Invoke them from your __init__ if # - # desired. # - # ---------------------------------------------------------# - - def _get_block_name(self, block): - # Note: this implementation requires that self._set_locals() be - # called to gather the blocknum's - return 'block%s' % self.blocknum[block] - - def _set_locals(self): - # this code is partly borrowed from - # rpython.translator.c.funcgen.FunctionCodeGenerator - # TODO: refactoring to avoid code duplication - - self.blocknum = {} - - graph = self.graph - mix = [graph.getreturnvar()] - for block in graph.iterblocks(): - self.blocknum[block] = len(self.blocknum) - mix.extend(block.inputargs) - - for op in block.operations: - mix.extend(op.args) - mix.append(op.result) - if getattr(op, "cleanup", None) is not None: - cleanup_finally, cleanup_except = op.cleanup - for cleanupop in cleanup_finally + cleanup_except: - mix.extend(cleanupop.args) - mix.append(cleanupop.result) - for link in block.exits: - mix.extend(link.getextravars()) - mix.extend(link.args) - - # filter only locals variables, i.e.: - # - must be variables - # - must appear only once - # - must not be function parameters - # - must not have 'void' type - - args = {} - for ctstype, name in self.args: - args[name] = True - - locals = [] - seen = identity_dict() - for v in mix: - is_var = isinstance(v, flowmodel.Variable) - if v not in seen and is_var and v.name not in args and v.concretetype is not ootype.Void: - locals.append(self.cts.llvar_to_cts(v)) - seen[v] = True - - self.locals = locals - - def _set_args(self): - args = [arg for arg in self.graph.getargs() if arg.concretetype is not ootype.Void] - self.args = map(self.cts.llvar_to_cts, args) - self.argset = set([argname for argtype, argname in self.args]) diff --git a/rpython/translator/oosupport/genoo.py b/rpython/translator/oosupport/genoo.py deleted file mode 100644 --- a/rpython/translator/oosupport/genoo.py +++ /dev/null @@ -1,104 +0,0 @@ -""" basic oogenerator -""" - -from py.builtin import set -from rpython.translator.oosupport import constant as ooconst -from rpython.translator.oosupport.treebuilder import build_trees -from rpython.translator.backendopt.ssa import SSI_to_SSA - -class GenOO(object): - TypeSystem = None - Function = None - Database = None - opcodes = None - log = None - - # Defines the subclasses used to represent complex constants by - # _create_complex_const: - - ConstantGenerator = None - NullConst = ooconst.NullConst - InstanceConst = ooconst.InstanceConst - RecordConst = ooconst.RecordConst - ClassConst = ooconst.ClassConst - ListConst = ooconst.ListConst - ArrayConst = ooconst.ArrayConst - StaticMethodConst = ooconst.StaticMethodConst - CustomDictConst = ooconst.CustomDictConst - DictConst = ooconst.DictConst - WeakRefConst = ooconst.WeakRefConst - - def __init__(self, tmpdir, translator, entrypoint, config=None, exctrans=False): - self.tmpdir = tmpdir - self.translator = translator - self.entrypoint = entrypoint - self.db = self.Database(self) - if config is None: - from rpython.config.translationoption import get_combined_translation_config - config = get_combined_translation_config(translating=True) - self.config = config - - # XXX: move this option out of the 'cli' section - exctrans = exctrans or translator.config.translation.cli.exception_transformer - if exctrans: - self.db.exceptiontransformer = translator.getexceptiontransformer() - - self.append_prebuilt_nodes() - - if exctrans: - etrafo = self.db.exceptiontransformer - for graph in translator.graphs: - etrafo.create_exception_handling(graph) - - if translator.config.translation.backendopt.stack_optimization: - self.stack_optimization() - - def stack_optimization(self): - for graph in self.translator.graphs: - SSI_to_SSA(graph) - build_trees(graph) - - def append_prebuilt_nodes(self): - pass - - def generate_source(self): - self.ilasm = self.create_assembler() - self.fix_names() - self.gen_entrypoint() - self.gen_pendings() - self.db.gen_constants(self.ilasm) - self.ilasm.close() - - def gen_entrypoint(self): - if self.entrypoint: - self.entrypoint.set_db(self.db) - self.db.pending_node(self.entrypoint) - else: - self.db.pending_function(self.translator.graphs[0]) - - def gen_pendings(self): - n = 0 - while self.db._pending_nodes: - node = self.db._pending_nodes.pop() - node.render(self.ilasm) - self.db._rendered_nodes.add(node) - n+=1 - if (n%100) == 0: - total = len(self.db._pending_nodes) + n - self.log.graphs('Rendered %d/%d (approx. %.2f%%)' %\ - (n, total, n*100.0/total)) - - def fix_names(self): - # it could happen that two distinct graph have the same name; - # here we assign an unique name to each graph. - names = set() - for graph in self.translator.graphs: - base_name = graph.name - i = 0 - while graph.name in names: - graph.name = '%s_%d' % (base_name, i) - i+=1 - names.add(graph.name) - - def create_assembler(self): - raise NotImplementedError diff --git a/rpython/translator/oosupport/metavm.py b/rpython/translator/oosupport/metavm.py deleted file mode 100644 --- a/rpython/translator/oosupport/metavm.py +++ /dev/null @@ -1,525 +0,0 @@ - -""" -Varius microopcodes for different ootypesystem based backends - -These microopcodes are used to translate from the ootype operations to -the operations of a particular backend. For an example, see -cli/opcodes.py which maps from ootype opcodes to sets of metavm -instructions. - -See the MicroInstruction class for discussion on the methods of a -micro-op. -""" - -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.extfunc import ExtFuncEntry, is_external - -class Generator(object): - - def add_comment(self, text): - """ - Called w/in a function w/ a text string that could be - usefully added to the output. - """ - pass - - def add_section(self, text): - """ - Prints a distinguished comment - """ - self.add_comment("_" * 70) - self.add_comment(text) - - def pop(self, TYPE): - """ Pops a value off the top of the stack, which is of the - given TYPE. - - Stack: val, ... -> ...""" - raise NotImplementedError - - def dup(self, TYPE): - """ Duplicates the top of the stack, which is of the given TYPE. - - Stack: val, ... -> val, val, ...""" - raise NotImplementedError - - def emit(self, instr, *args): - """ - Invoked by InstructionList.render() when we encounter a - non-MicroInstruction in the list of instructions. This is - typically used to encode small single operands as strings. - """ - pass - - def load(self, v): - """ - Loads an item 'v' onto the stack - - Stack: ... -> v, ... - """ - pass - - def store(self, v): - """ - Stores an item from the stack into 'v' - - Stack: value, ... -> ... - """ - pass - - def set_field(self, CONCRETETYPE, fieldname): - """ - Stores a value into a field. - - 'CONCRETETYPE' should be the type of the class that has the field - 'fieldname' is a string with the name of the field - - Stack: value, item, ... -> ... - """ - raise NotImplementedError - - def get_field(self, CONCRETETYPE, fieldname): - """ - Gets a value from a specified field. - - 'CONCRETETYPE' should be the type of the class that has the field - 'fieldname' is the name of the field - - Stack: item, ... -> ... - """ - raise NotImplementedError - - def downcast(self, TYPE): - """ - Casts the object on the top of the stack to be of the specified - ootype. Assumed to raise an exception on failure. - - Stack: obj, ... -> obj, ... - """ - raise NotImplementedError - - def getclassobject(self, OOINSTANCE): - """ - Gets the class object for the OOINSTANCE. The type of the class - object will depend on the backend, of course; for example in JVM - it is java.lang.Class. - """ - raise NotImplementedError - - def instantiate(self): - """ - Instantiates an instance of the Class object that is on top of - the stack. Class objects refers to an object representing a - class. Used to implement RuntimeNew. - - Stack: class_obj, ... -> instance_obj, ... - """ - raise NotImplementedError - - def instanceof(self, TYPE): - """ - Determines whether the object on the top of the stack is an - instance of TYPE (an ootype). - - Stack: obj, ... -> boolean, ... - """ - pass - - def branch_unconditionally(self, target_label): - """ Branches to target_label unconditionally """ - raise NotImplementedError - - def branch_conditionally(self, iftrue, target_label): - """ Branches to target_label depending on the value on the top of - the stack. If iftrue is True, then the branch occurs if the value - on top of the stack is true; if iftrue is false, then the branch - occurs if the value on the top of the stack is false - - Stack: cond, ... -> ... """ - raise NotImplementedError - - def branch_if_equal(self, target_label): - """ - Pops two values from the stack and branches to target_label if - they are equal. - - Stack: obj1, obj2, ... -> ... - """ - raise NotImplementedError - - def call_graph(self, graph): - """ Invokes the function corresponding to the given graph. The - arguments to the graph have already been pushed in order - (i.e., first argument pushed first, etc). Pushes the return - value. - - Stack: argN...arg2, arg1, arg0, ... -> ret, ... """ - raise NotImplementedError - - def prepare_generic_argument(self, ITEMTYPE): - """ - Invoked after a generic argument has been pushed onto the stack. - May not need to do anything, but some backends, *cough*Java*cough*, - require boxing etc. - """ - return # by default do nothing - - def call_method(self, OOCLASS, method_name): - """ Invokes the given method on the object on the stack. The - this ptr and all arguments have already been pushed. - - Stack: argN, arg2, arg1, this, ... -> ret, ... """ - raise NotImplementedError - - def prepare_call_primitive(self, op, module, name): - """ see call_primitive: by default does nothing """ - pass - - def call_primitive(self, op, module, name): - """ Like call_graph, but it has been suggested that the method be - rendered as a primitive. The full sequence for invoking a primitive: - - self.prepare_call_primitive(op, module, name) - for each arg: self.load(arg) - self.call_primitive(op, module, name) - - Stack: argN...arg2, arg1, arg0, ... -> ret, ... """ - raise NotImplementedError - - def prepare_call_oostring(self, OOTYPE): - " see call_oostring " - pass - - def call_oostring(self, OOTYPE): - """ Invoked for the oostring opcode with both operands - (object, int base) already pushed onto the stack. - prepare_call_oostring() is invoked before the operands are - pushed.""" - raise NotImplementedError - - def prepare_call_oounicode(self, OOTYPE): - " see call_oounicode " - pass - - def call_oounicode(self, OOTYPE): - """ Invoked for the oounicode opcode with the operand already - pushed onto the stack. prepare_call_oounicode() is invoked - before the operand is pushed. """ - raise NotImplementedError - - def new(self, TYPE): - """ Creates a new object of the given type. - - Stack: ... -> newobj, ... """ - raise NotImplementedError - - def oonewarray(self, TYPE, length): - """ Creates a new array of the given type with the given length. - - Stack: ... -> newobj, ... """ - raise NotImplementedError - - - def push_null(self, TYPE): - """ Push a NULL value onto the stack (the NULL value represents - a pointer to an instance of OOType TYPE, if it matters to you). """ - raise NotImplementedError - - def push_primitive_constant(self, TYPE, value): - """ Push an instance of TYPE onto the stack with the given - value. TYPE will be one of the types enumerated in - oosupport.constant.PRIMITIVE_TYPES. value will be its - corresponding ootype implementation. """ - raise NotImplementedError - - def get_instrution_count(self): - """ - Return the number of opcodes in the current function, or -1 - if the backend doesn't care about it. Default is -1 - """ - return -1 - -class InstructionList(list): - def render(self, generator, op): - for instr in self: - if isinstance(instr, MicroInstruction): - instr.render(generator, op) - else: - generator.emit(instr) - - def __call__(self, *args): - return self.render(*args) - - -class MicroInstruction(object): - def render(self, generator, op): - """ - Generic method which emits code to perform this microinstruction. - - 'generator' -> the class which generates actual code emitted - 'op' -> the instruction from the FlowIR - """ - pass - - def __str__(self): - return self.__class__.__name__ - - def __call__(self, *args): - return self.render(*args) - -class _DoNothing(MicroInstruction): - def render(self, generator, op): - pass - -class PushArg(MicroInstruction): - """ Pushes a given operand onto the stack. """ - def __init__(self, n): - self.n = n - - def render(self, generator, op): - generator.load(op.args[self.n]) - -class _PushAllArgs(MicroInstruction): - """ Pushes all arguments of the instruction onto the stack in order. """ - def __init__(self, slice=None): - """ Eventually slice args - """ - self.slice = slice - - def render(self, generator, op): - if self.slice is not None: - args = op.args[self.slice] - else: - args = op.args - for arg in args: - generator.load(arg) - -class PushPrimitive(MicroInstruction): - def __init__(self, TYPE, value): - self.TYPE = TYPE - self.value = value - - def render(self, generator, op): - generator.push_primitive_constant(self.TYPE, self.value) - -class _StoreResult(MicroInstruction): - def render(self, generator, op): - generator.store(op.result) - -class _SetField(MicroInstruction): - def render(self, generator, op): - this, field, value = op.args -## if field.value == 'meta': -## return # TODO - - if value.concretetype is ootype.Void: - return - generator.load(this) - generator.load(value) - generator.set_field(this.concretetype, field.value) - -class _GetField(MicroInstruction): - def render(self, generator, op): - # OOType produces void values on occassion that can safely be ignored - if op.result.concretetype is ootype.Void: - return - this, field = op.args - generator.load(this) - generator.get_field(this.concretetype, field.value) - -class _DownCast(MicroInstruction): - """ Push the argument op.args[0] and cast it to the desired type, leaving - result on top of the stack. """ - def render(self, generator, op): - RESULTTYPE = op.result.concretetype - generator.load(op.args[0]) - generator.downcast(RESULTTYPE) - -class _InstanceOf(MicroInstruction): - """ Push the argument op.args[0] and cast it to the desired type, leaving - result on top of the stack. """ - def render(self, generator, op): - RESULTTYPE = op.result.concretetype - generator.load(op.args[0]) - generator.instanceof(RESULTTYPE) - -# There are three distinct possibilities where we need to map call differently: -# 1. Object is marked with rpython_hints as a builtin, so every attribut access -# and function call goes as builtin -# 2. Function called is a builtin, so it might be mapped to attribute access, builtin function call -# or even method call -# 3. Object on which method is called is primitive object and method is mapped to some -# method/function/attribute access -class _GeneralDispatcher(MicroInstruction): - def __init__(self, builtins, class_map): - self.builtins = builtins - self.class_map = class_map - - def render(self, generator, op): - raise NotImplementedError("pure virtual class") - - def check_builtin(self, this): - if not isinstance(this, ootype.Instance): - return False - return this._hints.get('_suggested_external') - -class _MethodDispatcher(_GeneralDispatcher): - def render(self, generator, op): - method = op.args[0].value - this = op.args[1].concretetype - if self.check_builtin(this): - return self.class_map['CallBuiltinObject'].render(generator, op) - try: - self.builtins.builtin_obj_map[this.__class__][method](generator, op) - except KeyError: - return self.class_map['CallMethod'].render(generator, op) - -class _CallDispatcher(_GeneralDispatcher): - def render(self, generator, op): - func = op.args[0] - # XXX we need to sort out stuff here at some point - if is_external(func): - func_name = func.value._name.split("__")[0] - try: - return self.builtins.builtin_map[func_name](generator, op) - except KeyError: - return self.class_map['CallBuiltin'](func_name)(generator, op) - return self.class_map['Call'].render(generator, op) - -class _GetFieldDispatcher(_GeneralDispatcher): - def render(self, generator, op): - if self.check_builtin(op.args[0].concretetype): - return self.class_map['GetBuiltinField'].render(generator, op) - else: - return self.class_map['GetField'].render(generator, op) - -class _SetFieldDispatcher(_GeneralDispatcher): - def render(self, generator, op): - if self.check_builtin(op.args[0].concretetype): - return self.class_map['SetBuiltinField'].render(generator, op) - else: - return self.class_map['SetField'].render(generator, op) - -class _New(MicroInstruction): - def render(self, generator, op): - try: - op.args[0].value._hints['_suggested_external'] - generator.ilasm.new(op.args[0].value._name.split('.')[-1]) - except (KeyError, AttributeError): - if op.args[0].value is ootype.Void: - return - generator.new(op.args[0].value) - - -class _OONewArray(MicroInstruction): - def render(self, generator, op): - if op.args[0].value is ootype.Void: - return - generator.oonewarray(op.args[0].value, op.args[1]) - - -class BranchUnconditionally(MicroInstruction): - def __init__(self, label): - self.label = label - def render(self, generator, op): - generator.branch_unconditionally(self.label) - -class BranchIfTrue(MicroInstruction): - def __init__(self, label): - self.label = label - def render(self, generator, op): - generator.branch_conditionally(True, self.label) - -class BranchIfFalse(MicroInstruction): - def __init__(self, label): - self.label = label - def render(self, generator, op): - generator.branch_conditionally(False, self.label) - - -def get_primitive_name(sm): - try: - sm.graph - return None - except AttributeError: - pass - try: - return 'rffi', sm._obj.oo_primitive - except AttributeError: - pass - return sm._name.rsplit('.', 1) - -class _Call(MicroInstruction): - - def render(self, generator, op): - callee = op.args[0].value - is_primitive = get_primitive_name(callee) - - if is_primitive: - module, name = is_primitive - generator.prepare_call_primitive(op, module, name) - - for arg in op.args[1:]: - generator.load(arg) - - if is_primitive: - generator.call_primitive(op, module, name) - else: - generator.call_graph(callee.graph) - - -class _CallMethod(MicroInstruction): - def render(self, generator, op): - method = op.args[0] # a FlowConstant string... - this = op.args[1] - for arg in op.args[1:]: - generator.load(arg) - generator.call_method(this.concretetype, method.value) - -class _RuntimeNew(MicroInstruction): - def render(self, generator, op): - generator.load(op.args[0]) - generator.instantiate() - generator.downcast(op.result.concretetype) - -class _OOString(MicroInstruction): - def render(self, generator, op): - ARGTYPE = op.args[0].concretetype - generator.prepare_call_oostring(ARGTYPE) - generator.load(op.args[0]) - generator.load(op.args[1]) - generator.call_oostring(ARGTYPE) - -class _OOUnicode(MicroInstruction): - def render(self, generator, op): - v_base = op.args[1] - assert v_base.value == -1, "The second argument of oounicode must be -1" - - ARGTYPE = op.args[0].concretetype - generator.prepare_call_oounicode(ARGTYPE) - generator.load(op.args[0]) - generator.call_oounicode(ARGTYPE) - -class _CastTo(MicroInstruction): - def render(self, generator, op): - generator.load(op.args[0]) - INSTANCE = op.args[1].value - class_name = generator.db.pending_class(INSTANCE) - generator.isinstance(class_name) - -New = _New() -OONewArray = _OONewArray() - -PushAllArgs = _PushAllArgs() -StoreResult = _StoreResult() -SetField = _SetField() -GetField = _GetField() -DownCast = _DownCast() -DoNothing = _DoNothing() -Call = _Call() -CallMethod = _CallMethod() -RuntimeNew = _RuntimeNew() -OOString = _OOString() -OOUnicode = _OOUnicode() -CastTo = _CastTo() - diff --git a/rpython/translator/oosupport/support.py b/rpython/translator/oosupport/support.py deleted file mode 100644 --- a/rpython/translator/oosupport/support.py +++ /dev/null @@ -1,36 +0,0 @@ -NT_OS = dict( - O_RDONLY = 0x0000, - O_WRONLY = 0x0001, - O_RDWR = 0x0002, - O_APPEND = 0x0008, - O_CREAT = 0x0100, - O_TRUNC = 0x0200, - O_TEXT = 0x4000, - O_BINARY = 0x8000 - ) - -def _patch_os(defs=None): - """ - Modify the value of some attributes of the os module to be sure - they are the same on every platform pypy is compiled on. Returns a - dictionary containing the original values that can be passed to - patch_os to rollback to the original values. - """ - - import os - if defs is None: - defs = NT_OS - olddefs = {} - for name, value in defs.iteritems(): - try: - olddefs[name] = getattr(os, name) - except AttributeError: - pass - setattr(os, name, value) - return olddefs - -def patch_os(): - return _patch_os() - From noreply at buildbot.pypy.org Sun Jul 7 16:20:31 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 16:20:31 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Fix llinterp Message-ID: <20130707142031.489D61C13AA@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65256:cd048265c9ab Date: 2013-07-07 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/cd048265c9ab/ Log: Fix llinterp diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -241,16 +241,6 @@ assert False, "type error: %r val from %r var/const" % (lltype.typeOf(val), varorconst.concretetype) return val - def getval_or_subop(self, varorsubop): - from rpython.translator.oosupport.treebuilder import SubOperation - if isinstance(varorsubop, SubOperation): - self.eval_operation(varorsubop.op) - resultval = self.getval(varorsubop.op.result) - del self.bindings[varorsubop.op.result] # XXX hack - return resultval - else: - return self.getval(varorsubop) - # _______________________________________________________ # other helpers def getoperationhandler(self, opname): @@ -406,7 +396,7 @@ if getattr(ophandler, 'specialform', False): retval = ophandler(*operation.args) else: - vals = [self.getval_or_subop(x) for x in operation.args] + vals = [self.getval(x) for x in operation.args] if getattr(ophandler, 'need_result_type', False): vals.insert(0, operation.result.concretetype) try: From noreply at buildbot.pypy.org Sun Jul 7 16:46:42 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 7 Jul 2013 16:46:42 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: fix rpython/translator/ Message-ID: <20130707144642.BE9581C00B9@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65257:4a56712ac399 Date: 2013-07-07 16:45 +0200 http://bitbucket.org/pypy/pypy/changeset/4a56712ac399/ Log: fix rpython/translator/ diff --git a/rpython/translator/test/test_exceptiontransform.py b/rpython/translator/test/test_exceptiontransform.py --- a/rpython/translator/test/test_exceptiontransform.py +++ b/rpython/translator/test/test_exceptiontransform.py @@ -26,8 +26,12 @@ _already_transformed[t] = True return interp.eval_graph(graph, values) -class BaseTestExceptionTransform: - type_system = None +class TestExceptionTransform: + type_system = 'lltype' + + def compile(self, fn, inputargs): + from rpython.translator.c.test.test_genc import compile + return compile(fn, inputargs) def transform_func(self, fn, inputtypes, backendopt=False): t = TranslationContext() @@ -45,9 +49,6 @@ t.view() return t, g - def compile(self, fn, inputargs): - raise NotImplementedError - def test_simple(self): def one(): return 1 @@ -57,7 +58,7 @@ return one() t, g = self.transform_func(foo, []) - assert len(list(g.iterblocks())) == 2 # graph does not change + assert len(list(g.iterblocks())) == 2 # graph does not change result = interpret(foo, []) assert result == 1 f = self.compile(foo, []) @@ -176,7 +177,7 @@ t.buildrtyper(type_system=self.type_system).specialize() g = graphof(t, f) etrafo = exceptiontransform.ExceptionTransformer(t) - etrafo.create_exception_handling(g) + etrafo.create_exception_handling(g) assert etrafo.raise_analyzer.analyze_direct_call(g) def test_reraise_is_not_raise(self): @@ -207,14 +208,6 @@ result = f(1) assert result == -42 - -class TestLLType(BaseTestExceptionTransform): - type_system = 'lltype' - - def compile(self, fn, inputargs): - from rpython.translator.c.test.test_genc import compile - return compile(fn, inputargs) - def test_needs_keepalive(self): check_debug_build() from rpython.rtyper.lltypesystem import lltype @@ -279,11 +272,3 @@ f = self.compile(foo, []) res = f() assert res == 42 - - -class TestOOType(BaseTestExceptionTransform): - type_system = 'ootype' - - def compile(self, fn, inputargs): - from rpython.translator.cli.test.runtest import compile_function - return compile_function(fn, inputargs, auto_raise_exc=True, exctrans=True) diff --git a/rpython/translator/test/test_interactive.py b/rpython/translator/test/test_interactive.py --- a/rpython/translator/test/test_interactive.py +++ b/rpython/translator/test/test_interactive.py @@ -9,7 +9,7 @@ t = Translation(f, [int, int]) assert t.context is t.driver.translator assert t.config is t.driver.config is t.context.config - + s = t.annotate() assert s.knowntype == int @@ -27,7 +27,7 @@ t.annotate() t.rtype() - assert 'rtype_lltype' in t.driver.done + assert 'rtype_lltype' in t.driver.done def test_simple_backendopt(): def f(x, y): @@ -35,7 +35,7 @@ t = Translation(f, [int, int], backend='c') t.backendopt() - + assert 'backendopt_lltype' in t.driver.done def test_simple_source(): @@ -53,7 +53,7 @@ t = Translation(f, [int, int]) py.test.raises(Exception, "t.source()") - + def test_disable_logic(): def f(x,y): @@ -67,7 +67,7 @@ def test_simple_compile_c(): import ctypes - + def f(x,y): return x+y @@ -87,28 +87,7 @@ t = Translation(f, [int, int]) t.rtype(type_system='lltype') - assert 'rtype_lltype' in t.driver.done - - t = Translation(f, [int, int]) - t.rtype(type_system='ootype') - assert 'rtype_ootype' in t.driver.done - - t = Translation(f, [int, int], type_system='ootype') - t.rtype() - assert 'rtype_ootype' in t.driver.done - - t = Translation(f, [int, int]) - t.rtype(backend='cli') - assert 'rtype_ootype' in t.driver.done - - - t = Translation(f, [int, int], backend='cli', type_system='ootype') - t.rtype() - assert 'rtype_ootype' in t.driver.done - - t = Translation(f, [int, int], type_system='lltype') - t.annotate() - py.test.raises(Exception, "t.rtype(backend='cli')") + assert 'rtype_lltype' in t.driver.done t = Translation(f, [int, int], backend='cli') t.annotate() diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -164,7 +164,7 @@ return l[j] def external_function(): return os.system("ls") - graph, t = translate(list_basic_ops, [int, int], False) + graph, t = translate(list_basic_ops, [int, int], False) for block in graph.iterblocks(): for op in block.operations: if op.opname == "direct_call": @@ -175,7 +175,7 @@ # a call to a wrapper function which itself contains the # real call to a graph-less external ll function, so # we check recursively - graph, t = translate(external_function, [], False) + graph, t = translate(external_function, [], False) found = [] def walkgraph(graph): for block in graph.iterblocks(): @@ -196,7 +196,7 @@ def test_huge_func(): g = None gstring = "def g(x):\n%s%s" % (" x = x + 1\n" * 1000, " return x\n") - exec gstring + exec gstring assert g(1) == 1001 # does not crash: previously join_blocks would barf on this graph, t = translate(g, [int]) @@ -308,7 +308,7 @@ def test_iterate_over_list(self): def wrap(elem): return elem - + def f(i): new_l = [] l = range(4) @@ -324,7 +324,7 @@ 'getattr': 1, 'simple_call': 3, }) - + class TestLLSpecializeListComprehension: typesystem = 'lltype' @@ -442,7 +442,3 @@ interp, graph = self.specialize(main, [int]) res = interp.eval_graph(graph, [10]) assert res == 5 * 17 - - -class TestOOSpecializeListComprehension(TestLLSpecializeListComprehension): - typesystem = 'ootype' diff --git a/rpython/translator/test/test_unsimplify.py b/rpython/translator/test/test_unsimplify.py --- a/rpython/translator/test/test_unsimplify.py +++ b/rpython/translator/test/test_unsimplify.py @@ -26,7 +26,7 @@ interp = LLInterpreter(t.rtyper) result = interp.eval_graph(graph, [1, 2]) assert result == 5 - + def test_split_blocks_conditional(): for i in range(3): def f(x, y): @@ -73,38 +73,38 @@ def test_call_initial_function(): tmpfile = str(udir.join('test_call_initial_function')) - for type_system in ['lltype', 'ootype']: - def f(x): - return x * 6 - def hello_world(): - if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) - os.close(fd) - graph, t = translate(f, [int], type_system) - call_initial_function(t, hello_world) - # - if os.path.exists(tmpfile): - os.unlink(tmpfile) - interp = LLInterpreter(t.rtyper) - result = interp.eval_graph(graph, [7]) - assert result == 42 - assert os.path.isfile(tmpfile) + type_system = 'lltype' + def f(x): + return x * 6 + def hello_world(): + if we_are_translated(): + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) + os.close(fd) + graph, t = translate(f, [int], type_system) + call_initial_function(t, hello_world) + # + if os.path.exists(tmpfile): + os.unlink(tmpfile) + interp = LLInterpreter(t.rtyper) + result = interp.eval_graph(graph, [7]) + assert result == 42 + assert os.path.isfile(tmpfile) def test_call_final_function(): tmpfile = str(udir.join('test_call_final_function')) - for type_system in ['lltype', 'ootype']: - def f(x): - return x * 6 - def goodbye_world(): - if we_are_translated(): - fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) - os.close(fd) - graph, t = translate(f, [int], type_system) - call_final_function(t, goodbye_world) - # - if os.path.exists(tmpfile): - os.unlink(tmpfile) - interp = LLInterpreter(t.rtyper) - result = interp.eval_graph(graph, [7]) - assert result == 42 - assert os.path.isfile(tmpfile) + type_system = 'lltype' + def f(x): + return x * 6 + def goodbye_world(): + if we_are_translated(): + fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) + os.close(fd) + graph, t = translate(f, [int], type_system) + call_final_function(t, goodbye_world) + # + if os.path.exists(tmpfile): + os.unlink(tmpfile) + interp = LLInterpreter(t.rtyper) + result = interp.eval_graph(graph, [7]) + assert result == 42 + assert os.path.isfile(tmpfile) From noreply at buildbot.pypy.org Sun Jul 7 17:51:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 7 Jul 2013 17:51:59 +0200 (CEST) Subject: [pypy-commit] pypy default: Typo (thanks Skip) Message-ID: <20130707155159.F29DF1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65258:ea7a73c5a2ba Date: 2013-07-07 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/ea7a73c5a2ba/ Log: Typo (thanks Skip) diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -319,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. From noreply at buildbot.pypy.org Mon Jul 8 10:08:00 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 8 Jul 2013 10:08:00 +0200 (CEST) Subject: [pypy-commit] stmgc default: add division Message-ID: <20130708080800.749D71C335D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r366:b725847dbf48 Date: 2013-07-08 10:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/b725847dbf48/ Log: add division diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -196,6 +196,24 @@ return DuInt_FromInt(result); } +DuObject *du_div(DuObject *cons, DuObject *locals) +{ + int result = 1; + while (cons != Du_None) { + _du_read1(cons); + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result /= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + return DuInt_FromInt(result); +} + static DuObject *_du_intcmp(DuObject *cons, DuObject *locals, int mode) { DuObject *obj_a, *obj_b; @@ -575,6 +593,7 @@ DuFrame_SetBuiltinMacro(Du_Globals, "+", du_add); DuFrame_SetBuiltinMacro(Du_Globals, "-", du_sub); DuFrame_SetBuiltinMacro(Du_Globals, "*", du_mul); + DuFrame_SetBuiltinMacro(Du_Globals, "/", du_div); DuFrame_SetBuiltinMacro(Du_Globals, "<", du_lt); DuFrame_SetBuiltinMacro(Du_Globals, "<=", du_le); DuFrame_SetBuiltinMacro(Du_Globals, "==", du_eq); From noreply at buildbot.pypy.org Mon Jul 8 10:08:02 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 8 Jul 2013 10:08:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: merge Message-ID: <20130708080802.86FC51C335E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r367:158be99cc7db Date: 2013-07-08 10:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/158be99cc7db/ Log: merge diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -16,10 +16,10 @@ H_FILES = atomic_ops.h stmgc.h stmimpl.h \ et.h lists.h steal.h nursery.h gcpage.h \ - stmsync.h dbgmem.h fprintcolor.h + stmsync.h extra.h dbgmem.h fprintcolor.h C_FILES = et.c lists.c steal.c nursery.c gcpage.c \ - stmsync.c dbgmem.c fprintcolor.c + stmsync.c extra.c dbgmem.c fprintcolor.c DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -248,6 +248,36 @@ } } +gcptr stm_RepeatReadBarrier(gcptr P) +{ + /* Version of stm_DirectReadBarrier() that doesn't abort and assumes + * that 'P' was already an up-to-date result of a previous + * stm_DirectReadBarrier(). We only have to check if we did in the + * meantime a stm_write_barrier(). + */ + if (P->h_tid & GCFLAG_PUBLIC) + { + if (P->h_tid & GCFLAG_NURSERY_MOVED) + { + P = (gcptr)P->h_revision; + assert(P->h_tid & GCFLAG_PUBLIC); + } + if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + { + struct tx_descriptor *d = thread_descriptor; + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + P = item->val; + assert(!(P->h_tid & GCFLAG_PUBLIC)); + no_private_obj: + ; + } + } + assert(!(P->h_tid & GCFLAG_STUB)); + return P; +} + static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj, int from_stolen) { @@ -422,29 +452,6 @@ goto restart_all; } -#if 0 -void *stm_DirectReadBarrierFromR(void *G1, void *R_Container1, size_t offset) -{ - return _direct_read_barrier((gcptr)G1, (gcptr)R_Container1, offset); -} -#endif - -gcptr stm_RepeatReadBarrier(gcptr O) -{ - abort();//XXX -#if 0 - // LatestGlobalRevision(O) would either return O or abort - // the whole transaction, so omitting it is not wrong - struct tx_descriptor *d = thread_descriptor; - gcptr L; - wlog_t *entry; - G2L_FIND(d->global_to_local, O, entry, return O); - L = entry->val; - assert(L->h_revision == stm_local_revision); - return L; -#endif -} - static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { gcptr B; @@ -749,10 +756,10 @@ smp_spinloop(); } -#if 0 -size_t _stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, - int abort_reason, char *output); -#endif +void stm_abort_and_retry(void) +{ + AbortTransaction(ABRT_MANUAL); +} void AbortPrivateFromProtected(struct tx_descriptor *d); @@ -795,41 +802,24 @@ elapsed_time = 1; } -#if 0 - size_t size; if (elapsed_time >= d->longest_abort_info_time) { /* decode the 'abortinfo' and produce a human-readable summary in the string 'longest_abort_info' */ - size = _stm_decode_abort_info(d, elapsed_time, num, NULL); + size_t size = stm_decode_abort_info(d, elapsed_time, num, NULL); free(d->longest_abort_info); d->longest_abort_info = malloc(size); if (d->longest_abort_info == NULL) d->longest_abort_info_time = 0; /* out of memory! */ else { - if (_stm_decode_abort_info(d, elapsed_time, + if (stm_decode_abort_info(d, elapsed_time, num, d->longest_abort_info) != size) stm_fatalerror("during stm abort: object mutated unexpectedly\n"); d->longest_abort_info_time = elapsed_time; } } -#endif - -#if 0 - /* run the undo log in reverse order, cancelling the values set by - stm_ThreadLocalRef_LLSet(). */ - if (d->undolog.size > 0) { - gcptr *item = d->undolog.items; - long i; - for (i=d->undolog.size; i>=0; i-=2) { - void **addr = (void **)(item[i-2]); - void *oldvalue = (void *)(item[i-1]); - *addr = oldvalue; - } - } -#endif /* upon abort, set the reads size limit to 94% of how much was read so far. This should ensure that, assuming the retry does the same @@ -936,10 +926,7 @@ d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); -#if 0 - gcptrlist_clear(&d->undolog); gcptrlist_clear(&d->abortinfo); -#endif } void BeginTransaction(jmp_buf* buf) @@ -1496,17 +1483,6 @@ /************************************************************/ -#if 0 -void stm_ThreadLocalRef_LLSet(void **addr, void *newvalue) -{ - struct tx_descriptor *d = thread_descriptor; - gcptrlist_insert2(&d->undolog, (gcptr)addr, (gcptr)*addr); - *addr = newvalue; -} -#endif - -/************************************************************/ - struct tx_descriptor *stm_tx_head = NULL; struct tx_public_descriptor *stm_descriptor_array[MAX_THREADS] = {0}; static revision_t descriptor_array_free_list = 0; @@ -1635,11 +1611,8 @@ assert(d->private_from_protected.size == 0); gcptrlist_delete(&d->private_from_protected); gcptrlist_delete(&d->list_of_read_objects); -#if 0 gcptrlist_delete(&d->abortinfo); free(d->longest_abort_info); - gcptrlist_delete(&d->undolog); -#endif int num_aborts = 0, num_spinloops = 0; char line[256], *p = line; diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -152,9 +152,9 @@ unsigned int num_aborts[ABORT_REASONS]; unsigned int num_spinloops[SPINLOOP_REASONS]; struct GcPtrList list_of_read_objects; - //struct GcPtrList abortinfo; struct GcPtrList private_from_protected; struct G2L public_to_private; + struct GcPtrList abortinfo; char *longest_abort_info; long long longest_abort_info_time; revision_t *private_revision_ref; diff --git a/c4/extra.c b/c4/extra.c new file mode 100644 --- /dev/null +++ b/c4/extra.c @@ -0,0 +1,263 @@ +#include "stmimpl.h" + + +void stm_copy_to_old_id_copy(gcptr obj, gcptr id) +{ + //assert(!is_in_nursery(thread_descriptor, id)); + assert(id->h_tid & GCFLAG_OLD); + + size_t size = stmgc_size(obj); + memcpy(id, obj, size); + id->h_tid &= ~GCFLAG_HAS_ID; + id->h_tid |= GCFLAG_OLD; + dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id)); +} + +/************************************************************/ +/* Each object has a h_original pointer to an old copy of + the same object (e.g. an old revision), the "original". + The memory location of this old object is used as the ID + for this object. If h_original is NULL *and* it is an + old object copy, it itself is the original. This invariant + must be upheld by all code dealing with h_original. + The original copy must never be moved again. Also, it may + be just a stub-object. + + If we want the ID of an object which is still young, + we must preallocate an old shadow-original that is used + as the target of the young object in a minor collection. + In this case, we set the HAS_ID flag on the young obj + to notify minor_collect. + This flag can be lost if the young obj is stolen. Then + the stealing thread uses the shadow-original itself and + minor_collect must not overwrite it again. + Also, if there is already a backup-copy around, we use + this instead of allocating another old object to use as + the shadow-original. + */ + +static revision_t mangle_hash(revision_t n) +{ + /* To hash pointers in dictionaries. Assumes that i shows some + alignment (to 4, 8, maybe 16 bytes), so we use the following + formula to avoid the trailing bits being always 0. + This formula is reversible: two different values of 'i' will + always give two different results. + */ + return n ^ (((urevision_t)n) >> 4); +} + + +revision_t stm_hash(gcptr p) +{ + /* Prebuilt objects may have a specific hash stored in an extra + field. For now, we will simply always follow h_original and + see, if it is a prebuilt object (XXX: maybe propagate a flag + to all copies of a prebuilt to avoid this cache miss). + */ + if (p->h_original) { + if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + return p->h_original; + } + gcptr orig = (gcptr)p->h_original; + if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) { + return orig->h_original; + } + } + return mangle_hash(stm_id(p)); +} + + +revision_t stm_id(gcptr p) +{ + struct tx_descriptor *d = thread_descriptor; + revision_t result; + + if (p->h_original) { /* fast path */ + if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { + /* h_original may contain a specific hash value, + but in case of the prebuilt original version, + its memory location is the id */ + return (revision_t)p; + } + + dprintf(("stm_id(%p) has orig fst: %p\n", + p, (gcptr)p->h_original)); + return p->h_original; + } + else if (p->h_tid & GCFLAG_OLD) { + /* old objects must have an h_original xOR be + the original itself. */ + dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p)); + return (revision_t)p; + } + + spinlock_acquire(d->public_descriptor->collection_lock, 'I'); + /* old objects must have an h_original xOR be + the original itself. + if some thread stole p when it was still young, + it must have set h_original. stealing an old obj + makes the old obj "original". + */ + if (p->h_original) { /* maybe now? */ + result = p->h_original; + dprintf(("stm_id(%p) has orig: %p\n", + p, (gcptr)p->h_original)); + } + else { + /* must create shadow original object XXX: or use + backup, if exists */ + + /* XXX use stmgcpage_malloc() directly, we don't need to copy + * the contents yet */ + gcptr O = stmgc_duplicate_old(p); + p->h_original = (revision_t)O; + p->h_tid |= GCFLAG_HAS_ID; + + if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + gcptr B = (gcptr)p->h_revision; + B->h_original = (revision_t)O; + } + + result = (revision_t)O; + dprintf(("stm_id(%p) young, make shadow %p\n", p, O)); + } + + spinlock_release(d->public_descriptor->collection_lock); + return result; +} + +_Bool stm_pointer_equal(gcptr p1, gcptr p2) +{ + /* fast path for two equal pointers */ + if (p1 == p2) + return 1; + /* if p1 or p2 is NULL (but not both, because they are different + pointers), then return 0 */ + if (p1 == NULL || p2 == NULL) + return 0; + /* types must be the same */ + if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) + return 0; + return stm_id(p1) == stm_id(p2); +} + +/************************************************************/ + +void stm_abort_info_push(gcptr obj, long fieldoffsets[]) +{ + struct tx_descriptor *d = thread_descriptor; + obj = stm_read_barrier(obj); + gcptrlist_insert2(&d->abortinfo, obj, (gcptr)fieldoffsets); +} + +void stm_abort_info_pop(long count) +{ + struct tx_descriptor *d = thread_descriptor; + long newsize = d->abortinfo.size - 2 * count; + gcptrlist_reduce_size(&d->abortinfo, newsize < 0 ? 0 : newsize); +} + +size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, + int abort_reason, char *output) +{ + /* re-encodes the abort info as a single string. + For convenience (no escaping needed, no limit on integer + sizes, etc.) we follow the bittorrent format. */ + size_t totalsize = 0; + long i; + char buffer[32]; + size_t res_size; +#define WRITE(c) { totalsize++; if (output) *output++=(c); } +#define WRITE_BUF(p, sz) { totalsize += (sz); \ + if (output) { \ + memcpy(output, (p), (sz)); output += (sz); \ + } \ + } + WRITE('l'); + WRITE('l'); + res_size = sprintf(buffer, "i%llde", (long long)elapsed_time); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%de", (int)abort_reason); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lde", (long)d->public_descriptor_index); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lde", (long)d->atomic); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%de", (int)d->active); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lue", (unsigned long)d->count_reads); + WRITE_BUF(buffer, res_size); + res_size = sprintf(buffer, "i%lue", + (unsigned long)d->reads_size_limit_nonatomic); + WRITE_BUF(buffer, res_size); + WRITE('e'); + for (i=0; iabortinfo.size; i+=2) { + char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]); + long *fieldoffsets = (long*)d->abortinfo.items[i+1]; + long kind, offset; + size_t rps_size; + char *rps; + + while (1) { + kind = *fieldoffsets++; + if (kind <= 0) { + if (kind == -2) { + WRITE('l'); /* '[', start of sublist */ + continue; + } + if (kind == -1) { + WRITE('e'); /* ']', end of sublist */ + continue; + } + break; /* 0, terminator */ + } + offset = *fieldoffsets++; + switch(kind) { + case 1: /* signed */ + res_size = sprintf(buffer, "i%lde", + *(long*)(object + offset)); + WRITE_BUF(buffer, res_size); + break; + case 2: /* unsigned */ + res_size = sprintf(buffer, "i%lue", + *(unsigned long*)(object + offset)); + WRITE_BUF(buffer, res_size); + break; + case 3: /* a string of bytes from the target object */ + rps = *(char **)(object + offset); + offset = *fieldoffsets++; + if (rps) { + /* xxx a bit ad-hoc: it's a string whose length is a + * long at 'offset', following immediately the offset */ + rps_size = *(long *)(rps + offset); + offset += sizeof(long); + assert(rps_size >= 0); + res_size = sprintf(buffer, "%zu:", rps_size); + WRITE_BUF(buffer, res_size); + WRITE_BUF(rps + offset, rps_size); + } + else { + WRITE_BUF("0:", 2); + } + break; + default: + stm_fatalerror("corrupted abort log\n"); + } + } + } + WRITE('e'); + WRITE('\0'); /* final null character */ +#undef WRITE +#undef WRITE_BUF + return totalsize; +} + +char *stm_inspect_abort_info(void) +{ + struct tx_descriptor *d = thread_descriptor; + if (d->longest_abort_info_time <= 0) + return NULL; + d->longest_abort_info_time = 0; + return d->longest_abort_info; +} diff --git a/c4/extra.h b/c4/extra.h new file mode 100644 --- /dev/null +++ b/c4/extra.h @@ -0,0 +1,9 @@ +#ifndef _SRCSTM_EXTRA_H +#define _SRCSTM_EXTRA_H + + +void stm_copy_to_old_id_copy(gcptr obj, gcptr id); +size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time, + int abort_reason, char *output); + +#endif diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -225,7 +225,8 @@ id_copy->h_tid |= GCFLAG_VISITED; /* XXX: may not always need tracing? */ - gcptrlist_insert(&objects_to_trace, id_copy); + //if (!(id_copy->h_tid & GCFLAG_STUB)) + // gcptrlist_insert(&objects_to_trace, id_copy); } else { /* prebuilt originals won't get collected anyway diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -125,131 +125,6 @@ } /************************************************************/ -/* Each object has a h_original pointer to an old copy of - the same object (e.g. an old revision), the "original". - The memory location of this old object is used as the ID - for this object. If h_original is NULL *and* it is an - old object copy, it itself is the original. This invariant - must be upheld by all code dealing with h_original. - The original copy must never be moved again. Also, it may - be just a stub-object. - - If we want the ID of an object which is still young, - we must preallocate an old shadow-original that is used - as the target of the young object in a minor collection. - In this case, we set the HAS_ID flag on the young obj - to notify minor_collect. - This flag can be lost if the young obj is stolen. Then - the stealing thread uses the shadow-original itself and - minor_collect must not overwrite it again. - Also, if there is already a backup-copy around, we use - this instead of allocating another old object to use as - the shadow-original. - */ - -static revision_t mangle_hash(revision_t n) -{ - /* To hash pointers in dictionaries. Assumes that i shows some - alignment (to 4, 8, maybe 16 bytes), so we use the following - formula to avoid the trailing bits being always 0. - This formula is reversible: two different values of 'i' will - always give two different results. - */ - return n ^ (((urevision_t)n) >> 4); -} - - -revision_t stm_hash(gcptr p) -{ - /* Prebuilt objects may have a specific hash stored in an extra - field. For now, we will simply always follow h_original and - see, if it is a prebuilt object (XXX: maybe propagate a flag - to all copies of a prebuilt to avoid this cache miss). - */ - if (p->h_original) { - if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { - return p->h_original; - } - gcptr orig = (gcptr)p->h_original; - if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) { - return orig->h_original; - } - } - return mangle_hash(stm_id(p)); -} - - -revision_t stm_id(gcptr p) -{ - struct tx_descriptor *d = thread_descriptor; - revision_t result; - - if (p->h_original) { /* fast path */ - if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) { - /* h_original may contain a specific hash value, - but in case of the prebuilt original version, - its memory location is the id */ - return (revision_t)p; - } - - dprintf(("stm_id(%p) has orig fst: %p\n", - p, (gcptr)p->h_original)); - return p->h_original; - } - else if (p->h_tid & GCFLAG_OLD) { - /* old objects must have an h_original xOR be - the original itself. */ - dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p)); - return (revision_t)p; - } - - spinlock_acquire(d->public_descriptor->collection_lock, 'I'); - /* old objects must have an h_original xOR be - the original itself. - if some thread stole p when it was still young, - it must have set h_original. stealing an old obj - makes the old obj "original". - */ - if (p->h_original) { /* maybe now? */ - result = p->h_original; - dprintf(("stm_id(%p) has orig: %p\n", - p, (gcptr)p->h_original)); - } - else { - /* must create shadow original object XXX: or use - backup, if exists */ - - /* XXX use stmgcpage_malloc() directly, we don't need to copy - * the contents yet */ - gcptr O = stmgc_duplicate_old(p); - p->h_original = (revision_t)O; - p->h_tid |= GCFLAG_HAS_ID; - - if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { - gcptr B = (gcptr)p->h_revision; - B->h_original = (revision_t)O; - } - - result = (revision_t)O; - dprintf(("stm_id(%p) young, make shadow %p\n", p, O)); - } - - spinlock_release(d->public_descriptor->collection_lock); - return result; -} - -_Bool stm_pointer_equal(gcptr p1, gcptr p2) -{ - /* fast path for two equal pointers */ - if (p1 == p2) - return 1; - /* types must be the same */ - if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) - return 0; - return stm_id(p1) == stm_id(p2); -} - -/************************************************************/ static inline gcptr create_old_object_copy(gcptr obj) { @@ -266,18 +141,6 @@ return fresh_old_copy; } -void copy_to_old_id_copy(gcptr obj, gcptr id) -{ - assert(!is_in_nursery(thread_descriptor, id)); - assert(id->h_tid & GCFLAG_OLD); - - size_t size = stmgc_size(obj); - memcpy(id, obj, size); - id->h_tid &= ~GCFLAG_HAS_ID; - id->h_tid |= GCFLAG_OLD; - dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id)); -} - static void visit_if_young(gcptr *root) { gcptr obj = *root; @@ -303,7 +166,7 @@ /* already has a place to go to */ gcptr id_obj = (gcptr)obj->h_original; - copy_to_old_id_copy(obj, id_obj); + stm_copy_to_old_id_copy(obj, id_obj); fresh_old_copy = id_obj; obj->h_tid &= ~GCFLAG_HAS_ID; } @@ -485,7 +348,7 @@ we may occasionally see a PUBLIC object --- one that was a private/protected object when it was added to old_objects_to_trace, and has been stolen. So we have to - check and not do any change the obj->h_tid in that case. + check and not do any change to the obj->h_tid in that case. Otherwise this conflicts with the rule that we may only modify obj->h_tid of a public object in order to add PUBLIC_TO_PRIVATE. diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -1,8 +1,6 @@ #include "stmimpl.h" -void copy_to_old_id_copy(gcptr obj, gcptr id); - gcptr stm_stub_malloc(struct tx_public_descriptor *pd) { assert(pd->collection_lock != 0); @@ -167,7 +165,7 @@ /* use id-copy for us */ O = (gcptr)L->h_original; L->h_tid &= ~GCFLAG_HAS_ID; - copy_to_old_id_copy(L, O); + stm_copy_to_old_id_copy(L, O); O->h_original = 0; } else { /* Copy the object out of the other thread's nursery, diff --git a/c4/stmgc.c b/c4/stmgc.c --- a/c4/stmgc.c +++ b/c4/stmgc.c @@ -9,5 +9,6 @@ #include "nursery.c" #include "gcpage.c" #include "stmsync.c" +#include "extra.c" #include "dbgmem.c" #include "fprintcolor.c" diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -101,6 +101,22 @@ It is set to NULL by stm_initialize(). */ extern __thread gcptr stm_thread_local_obj; +/* For tracking where aborts occurs, you can push/pop information + into this stack. When an abort occurs this information is encoded + and flattened into a buffer which can later be retrieved with + stm_inspect_abort_info(). (XXX details not documented yet) */ +void stm_abort_info_push(gcptr obj, long fieldoffsets[]); +void stm_abort_info_pop(long count); +char *stm_inspect_abort_info(void); + +/* mostly for debugging support */ +void stm_abort_and_retry(void); +void stm_minor_collect(void); +void stm_major_collect(void); + + +/**************** END OF PUBLIC INTERFACE *****************/ +/************************************************************/ /* macro-like functionality */ diff --git a/c4/stmimpl.h b/c4/stmimpl.h --- a/c4/stmimpl.h +++ b/c4/stmimpl.h @@ -12,7 +12,7 @@ # endif #endif -#ifdef _GC_DEBUG +#if defined(_GC_DEBUG) && !defined(DUMP_EXTRA) # if _GC_DEBUG >= 2 # define DUMP_EXTRA # endif @@ -35,5 +35,6 @@ #include "et.h" #include "steal.h" #include "stmsync.h" +#include "extra.h" #endif diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -328,6 +328,18 @@ AbortNowIfDelayed(); /* if another thread ran a major GC */ } +void stm_minor_collect(void) +{ + stmgc_minor_collect(); + stmgcpage_possibly_major_collect(0); +} + +void stm_major_collect(void) +{ + stmgc_minor_collect(); + stmgcpage_possibly_major_collect(1); +} + /************************************************************/ /***** Prebuilt roots, added in the list as the transaction that changed diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -11,11 +11,11 @@ header_files = [os.path.join(parent_dir, _n) for _n in "et.h lists.h steal.h nursery.h gcpage.h " - "stmsync.h dbgmem.h fprintcolor.h " + "stmsync.h extra.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in "et.c lists.c steal.c nursery.c gcpage.c " - "stmsync.c dbgmem.c fprintcolor.c".split()] + "stmsync.c extra.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -65,6 +65,10 @@ long stm_atomic(long delta); int stm_enter_callback_call(void); void stm_leave_callback_call(int); + void stm_abort_info_push(gcptr obj, long fieldoffsets[]); + void stm_abort_info_pop(long count); + char *stm_inspect_abort_info(void); + void stm_abort_and_retry(void); /* extra non-public code */ void printfcolor(char *msg); @@ -619,7 +623,7 @@ assert fine == [True] def abort_and_retry(): - lib.AbortTransaction(lib.ABRT_MANUAL) + lib.stm_abort_and_retry() def classify(p): private_from_protected = (p.h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) != 0 diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py new file mode 100644 --- /dev/null +++ b/c4/test/test_extra.py @@ -0,0 +1,116 @@ +import py, sys, struct +from support import * + + +def setup_function(f): + lib.stm_clear_between_tests() + lib.stm_initialize_tests(getattr(f, 'max_aborts', 0)) + +def teardown_function(_): + lib.stm_finalize() + + +def test_abort_info_stack(): + p = nalloc(HDR) + q = nalloc(HDR) + lib.stm_abort_info_push(p, ffi.cast("long *", 123)) + lib.stm_abort_info_push(q, ffi.cast("long *", 125)) + lib.stm_abort_info_pop(2) + # no real test here + +def test_inspect_abort_info_signed(): + fo1 = ffi.new("long[]", [-2, 1, HDR, -1, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc(HDR + WORD) + lib.setlong(p, 0, -421289712) + lib.stm_abort_info_push(p, fo1) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("eli-421289712eee") + +def test_inspect_abort_info_nested_unsigned(): + fo1 = ffi.new("long[]", [-2, 2, HDR, 0]) + fo2 = ffi.new("long[]", [2, HDR + WORD, -1, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc(HDR + WORD) + q = nalloc(HDR + 2 * WORD) + lib.setlong(p, 0, sys.maxint) + lib.setlong(q, 1, -1) + lib.stm_abort_info_push(p, fo1) + lib.stm_abort_info_push(q, fo2) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("eli%dei%deee" % ( + sys.maxint, sys.maxint * 2 + 1)) + +def test_inspect_abort_info_string(): + fo1 = ffi.new("long[]", [3, HDR + WORD, HDR, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc_refs(2) + q = nalloc(HDR + 2 * WORD) + lib.setptr(p, 1, q) + lib.setlong(q, 0, 3) + word = "ABC" + "\xFF" * (WORD - 3) + lib.setlong(q, 1, struct.unpack("l", word)[0]) + lib.stm_abort_info_push(p, fo1) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("e3:ABCe") + +def test_inspect_null(): + fo1 = ffi.new("long[]", [3, HDR, HDR + 1, 0]) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + p = nalloc_refs(1) + lib.setptr(p, 0, ffi.NULL) # default + lib.stm_abort_info_push(p, fo1) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("e0:e") + +def test_latest_version(): + fo1 = ffi.new("long[]", [1, HDR, 0]) + p = palloc(HDR + WORD) + lib.rawsetlong(p, 0, -9827892) + # + @perform_transaction + def run(retry_counter): + if retry_counter == 0: + lib.stm_abort_info_push(p, fo1) + lib.setlong(p, 0, 424242) + abort_and_retry() + else: + c = lib.stm_inspect_abort_info() + assert c + assert ffi.string(c).endswith("ei424242ee") + +def test_pointer_equal(): + p = palloc(HDR) + assert lib.stm_pointer_equal(p, p) + assert not lib.stm_pointer_equal(p, ffi.NULL) + assert not lib.stm_pointer_equal(ffi.NULL, p) + assert lib.stm_pointer_equal(ffi.NULL, ffi.NULL) + q = lib.stm_write_barrier(p) + assert q != p + assert lib.stm_pointer_equal(p, q) + assert lib.stm_pointer_equal(q, q) + assert lib.stm_pointer_equal(q, p) diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -5,7 +5,7 @@ gcc -pthread -g -O2 -o duhton *.c ../c4/stmgc.c -Wall -lrt duhton_debug: *.c *.h ../c4/*.c ../c4/*.h - gcc -pthread -g -DDu_DEBUG -D_GC_DEBUG=2 -DGC_NURSERY=2048 -o duhton_debug *.c ../c4/stmgc.c -Wall -lrt + gcc -pthread -g -DDu_DEBUG -D_GC_DEBUGPRINTS=1 -DGC_NURSERY=2048 -o duhton_debug *.c ../c4/stmgc.c -Wall -lrt clean: rm -f duhton duhton_debug From noreply at buildbot.pypy.org Mon Jul 8 10:19:34 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 8 Jul 2013 10:19:34 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix division Message-ID: <20130708081934.2BCF01C335E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r368:8c466b875875 Date: 2013-07-08 10:19 +0200 http://bitbucket.org/pypy/stmgc/changeset/8c466b875875/ Log: fix division diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -198,7 +198,9 @@ DuObject *du_div(DuObject *cons, DuObject *locals) { - int result = 1; + int result = 0; + int first = 1; + while (cons != Du_None) { _du_read1(cons); DuObject *expr = _DuCons_CAR(cons); @@ -206,7 +208,12 @@ _du_save2(next, locals); DuObject *obj = Du_Eval(expr, locals); - result /= DuInt_AsInt(obj); + if (first) { + result = DuInt_AsInt(obj); + first = 0; + } else { + result /= DuInt_AsInt(obj); + } _du_restore2(next, locals); cons = next; From noreply at buildbot.pypy.org Mon Jul 8 10:28:38 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 8 Jul 2013 10:28:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: add a benchmark walking trees that are global and read-only Message-ID: <20130708082838.EFB111C3363@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r369:5a8d2ec62277 Date: 2013-07-08 10:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/5a8d2ec62277/ Log: add a benchmark walking trees that are global and read-only diff --git a/duhton/demo/trees.duh b/duhton/demo/trees.duh new file mode 100644 --- /dev/null +++ b/duhton/demo/trees.duh @@ -0,0 +1,17 @@ + +(defun create-tree (n) + (if (< n 1) (list 1) (list (create-tree (/ n 2)) (create-tree (/ n 2)))) +) + +(defun walk-tree (tree) + (if (== (len tree) 1) (get tree 0) + (+ (walk-tree (get tree 0)) (walk-tree (get tree 1))) + ) +) + +(setq tree (create-tree 1024)) +(print (walk-tree tree)) +(setq n 0) +(while (< n 1000) + (transaction walk-tree tree) + (setq n (+ n 1))) From noreply at buildbot.pypy.org Mon Jul 8 10:31:48 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 8 Jul 2013 10:31:48 +0200 (CEST) Subject: [pypy-commit] stmgc default: Another example (that unfortunately segfaults) Message-ID: <20130708083148.8D7A61C3363@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r370:b52283fb3bc9 Date: 2013-07-08 10:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/b52283fb3bc9/ Log: Another example (that unfortunately segfaults) diff --git a/duhton/demo/trees2.duh b/duhton/demo/trees2.duh new file mode 100644 --- /dev/null +++ b/duhton/demo/trees2.duh @@ -0,0 +1,19 @@ + +(defun create-tree (n) + (if (< n 1) (list 1) (list (create-tree (/ n 2)) (create-tree (/ n 2)))) +) + +(defun walk-tree (tree) + (if (== (len tree) 1) (get tree 0) + (+ (walk-tree (get tree 0)) (walk-tree (get tree 1))) + ) +) + +(defun lookup-tree () + (walk-tree (create-tree 1024)) +) + +(setq n 0) +(while (< n 1000) + (transaction lookup-tree) + (setq n (+ n 1))) From noreply at buildbot.pypy.org Mon Jul 8 10:53:40 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 10:53:40 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: rename _fastjson to _pypyjson Message-ID: <20130708085340.78BC61C0328@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r65259:eaf022dca962 Date: 2013-07-08 10:31 +0200 http://bitbucket.org/pypy/pypy/changeset/eaf022dca962/ Log: rename _fastjson to _pypyjson diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -107,9 +107,9 @@ try: # PyPy speedup, the interface is different than CPython's _json - import _fastjson + import _pypyjson except ImportError: - _fastjson = None + _pypyjson = None from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -328,8 +328,8 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - if _fastjson and not isinstance(s, unicode): - return _fastjson.loads(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) else: return _default_decoder.decode(s) if cls is None: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy", "_fastjson"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() diff --git a/pypy/module/_fastjson/__init__.py b/pypy/module/_pypyjson/__init__.py rename from pypy/module/_fastjson/__init__.py rename to pypy/module/_pypyjson/__init__.py diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py rename from pypy/module/_fastjson/interp_decoder.py rename to pypy/module/_pypyjson/interp_decoder.py diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py rename from pypy/module/_fastjson/test/test__fastjson.py rename to pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -1,6 +1,6 @@ # -*- encoding: utf-8 -*- import py -from pypy.module._fastjson.interp_decoder import JSONDecoder +from pypy.module._pypyjson.interp_decoder import JSONDecoder def test_skip_whitespace(): s = ' hello ' @@ -14,95 +14,95 @@ class AppTest(object): - spaceconfig = {"objspace.usemodules._fastjson": True} + spaceconfig = {"objspace.usemodules._pypyjson": True} def test_raise_on_unicode(self): - import _fastjson - raises(TypeError, _fastjson.loads, u"42") + import _pypyjson + raises(TypeError, _pypyjson.loads, u"42") def test_decode_constants(self): - import _fastjson - assert _fastjson.loads('null') is None - raises(ValueError, _fastjson.loads, 'nul') - raises(ValueError, _fastjson.loads, 'nu') - raises(ValueError, _fastjson.loads, 'n') - raises(ValueError, _fastjson.loads, 'nuXX') + import _pypyjson + assert _pypyjson.loads('null') is None + raises(ValueError, _pypyjson.loads, 'nul') + raises(ValueError, _pypyjson.loads, 'nu') + raises(ValueError, _pypyjson.loads, 'n') + raises(ValueError, _pypyjson.loads, 'nuXX') # - assert _fastjson.loads('true') is True - raises(ValueError, _fastjson.loads, 'tru') - raises(ValueError, _fastjson.loads, 'tr') - raises(ValueError, _fastjson.loads, 't') - raises(ValueError, _fastjson.loads, 'trXX') + assert _pypyjson.loads('true') is True + raises(ValueError, _pypyjson.loads, 'tru') + raises(ValueError, _pypyjson.loads, 'tr') + raises(ValueError, _pypyjson.loads, 't') + raises(ValueError, _pypyjson.loads, 'trXX') # - assert _fastjson.loads('false') is False - raises(ValueError, _fastjson.loads, 'fals') - raises(ValueError, _fastjson.loads, 'fal') - raises(ValueError, _fastjson.loads, 'fa') - raises(ValueError, _fastjson.loads, 'f') - raises(ValueError, _fastjson.loads, 'falXX') + assert _pypyjson.loads('false') is False + raises(ValueError, _pypyjson.loads, 'fals') + raises(ValueError, _pypyjson.loads, 'fal') + raises(ValueError, _pypyjson.loads, 'fa') + raises(ValueError, _pypyjson.loads, 'f') + raises(ValueError, _pypyjson.loads, 'falXX') def test_decode_string(self): - import _fastjson - res = _fastjson.loads('"hello"') + import _pypyjson + res = _pypyjson.loads('"hello"') assert res == u'hello' assert type(res) is unicode def test_decode_string_utf8(self): - import _fastjson + import _pypyjson s = u'àèìòù' - res = _fastjson.loads('"%s"' % s.encode('utf-8')) + res = _pypyjson.loads('"%s"' % s.encode('utf-8')) assert res == s def test_skip_whitespace(self): - import _fastjson + import _pypyjson s = ' "hello" ' - assert _fastjson.loads(s) == u'hello' + assert _pypyjson.loads(s) == u'hello' s = ' "hello" extra' - raises(ValueError, "_fastjson.loads(s)") + raises(ValueError, "_pypyjson.loads(s)") def test_unterminated_string(self): - import _fastjson + import _pypyjson s = '"hello' # missing the trailing " - raises(ValueError, "_fastjson.loads(s)") + raises(ValueError, "_pypyjson.loads(s)") def test_escape_sequence(self): - import _fastjson - assert _fastjson.loads(r'"\\"') == u'\\' - assert _fastjson.loads(r'"\""') == u'"' - assert _fastjson.loads(r'"\/"') == u'/' - assert _fastjson.loads(r'"\b"') == u'\b' - assert _fastjson.loads(r'"\f"') == u'\f' - assert _fastjson.loads(r'"\n"') == u'\n' - assert _fastjson.loads(r'"\r"') == u'\r' - assert _fastjson.loads(r'"\t"') == u'\t' + import _pypyjson + assert _pypyjson.loads(r'"\\"') == u'\\' + assert _pypyjson.loads(r'"\""') == u'"' + assert _pypyjson.loads(r'"\/"') == u'/' + assert _pypyjson.loads(r'"\b"') == u'\b' + assert _pypyjson.loads(r'"\f"') == u'\f' + assert _pypyjson.loads(r'"\n"') == u'\n' + assert _pypyjson.loads(r'"\r"') == u'\r' + assert _pypyjson.loads(r'"\t"') == u'\t' def test_escape_sequence_in_the_middle(self): - import _fastjson + import _pypyjson s = r'"hello\nworld"' - assert _fastjson.loads(s) == "hello\nworld" + assert _pypyjson.loads(s) == "hello\nworld" def test_unterminated_string_after_escape_sequence(self): - import _fastjson + import _pypyjson s = r'"hello\nworld' # missing the trailing " - raises(ValueError, "_fastjson.loads(s)") + raises(ValueError, "_pypyjson.loads(s)") def test_escape_sequence_unicode(self): - import _fastjson + import _pypyjson s = r'"\u1234"' - assert _fastjson.loads(s) == u'\u1234' + assert _pypyjson.loads(s) == u'\u1234' def test_invalid_utf_8(self): - import _fastjson + import _pypyjson s = '"\xe0"' # this is an invalid UTF8 sequence inside a string - raises(UnicodeDecodeError, "_fastjson.loads(s)") + raises(UnicodeDecodeError, "_pypyjson.loads(s)") def test_decode_numeric(self): import sys - import _fastjson + import _pypyjson def check(s, val): - res = _fastjson.loads(s) + res = _pypyjson.loads(s) assert type(res) is type(val) assert res == val # @@ -136,14 +136,14 @@ def test_nan(self): import math - import _fastjson - res = _fastjson.loads('NaN') + import _pypyjson + res = _pypyjson.loads('NaN') assert math.isnan(res) def test_decode_numeric_invalid(self): - import _fastjson + import _pypyjson def error(s): - raises(ValueError, _fastjson.loads, s) + raises(ValueError, _pypyjson.loads, s) # error(' 42 abc') error('.123') @@ -155,34 +155,34 @@ error('0123') # numbers can't start with 0 def test_decode_object(self): - import _fastjson - assert _fastjson.loads('{}') == {} - assert _fastjson.loads('{ }') == {} + import _pypyjson + assert _pypyjson.loads('{}') == {} + assert _pypyjson.loads('{ }') == {} # s = '{"hello": "world", "aaa": "bbb"}' - assert _fastjson.loads(s) == {'hello': 'world', + assert _pypyjson.loads(s) == {'hello': 'world', 'aaa': 'bbb'} - raises(ValueError, _fastjson.loads, '{"key"') - raises(ValueError, _fastjson.loads, '{"key": 42') + raises(ValueError, _pypyjson.loads, '{"key"') + raises(ValueError, _pypyjson.loads, '{"key": 42') def test_decode_object_nonstring_key(self): - import _fastjson - raises(ValueError, "_fastjson.loads('{42: 43}')") + import _pypyjson + raises(ValueError, "_pypyjson.loads('{42: 43}')") def test_decode_array(self): - import _fastjson - assert _fastjson.loads('[]') == [] - assert _fastjson.loads('[ ]') == [] - assert _fastjson.loads('[1]') == [1] - assert _fastjson.loads('[1, 2]') == [1, 2] - raises(ValueError, "_fastjson.loads('[1: 2]')") - raises(ValueError, "_fastjson.loads('[1, 2')") - raises(ValueError, """_fastjson.loads('["extra comma",]')""") + import _pypyjson + assert _pypyjson.loads('[]') == [] + assert _pypyjson.loads('[ ]') == [] + assert _pypyjson.loads('[1]') == [1] + assert _pypyjson.loads('[1, 2]') == [1, 2] + raises(ValueError, "_pypyjson.loads('[1: 2]')") + raises(ValueError, "_pypyjson.loads('[1, 2')") + raises(ValueError, """_pypyjson.loads('["extra comma",]')""") def test_unicode_surrogate_pair(self): - import _fastjson + import _pypyjson expected = u'z\U0001d120x' - res = _fastjson.loads('"z\\ud834\\udd20x"') + res = _pypyjson.loads('"z\\ud834\\udd20x"') assert res == expected From noreply at buildbot.pypy.org Mon Jul 8 10:53:42 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 10:53:42 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add a target to benchmark _pypyjson without a full translation Message-ID: <20130708085342.087711C0328@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r65260:a7e6ac9b38e9 Date: 2013-07-08 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/a7e6ac9b38e9/ Log: add a target to benchmark _pypyjson without a full translation diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/targetjson.py @@ -0,0 +1,143 @@ +import sys +import py +ROOT = py.path.local(__file__).dirpath('..', '..', '..') +sys.path.insert(0, str(ROOT)) + +import time +from rpython.rlib.streamio import open_file_as_stream +from pypy.interpreter.error import OperationError +from pypy.module._pypyjson.interp_decoder import loads + + + +## MSG = open('msg.json').read() + +class W_Root(object): + pass + +class W_Dict(W_Root): + def __init__(self): + self.dictval = {} + +class W_Unicode(W_Root): + def __init__(self, x): + self.unival = x + +class W_String(W_Root): + def __init__(self, x): + self.strval = x + +class W_Int(W_Root): + def __init__(self, x): + self.intval = x + +class W_Float(W_Root): + def __init__(self, x): + self.floatval = x + +class W_List(W_Root): + def __init__(self): + self.listval = [] + +class W_Singleton(W_Root): + def __init__(self, name): + self.name = name + +class FakeSpace(object): + + w_None = W_Singleton('None') + w_True = W_Singleton('True') + w_False = W_Singleton('False') + w_ValueError = W_Singleton('ValueError') + w_UnicodeDecodeError = W_Singleton('UnicodeDecodeError') + w_unicode = W_Unicode + w_int = W_Int + w_float = W_Float + + def newtuple(self, items): + return None + + def newdict(self): + return W_Dict() + + def newlist(self, items): + return W_List() + + def isinstance_w(self, w_x, w_type): + return isinstance(w_x, w_type) + + def str_w(self, w_x): + assert isinstance(w_x, W_String) + return w_x.strval + + def call_method(self, obj, name, arg): + assert name == 'append' + assert isinstance(obj, W_List) + obj.listval.append(arg) + call_method._dont_inline_ = True + + def call_function(self, w_func, *args_w): + return self.w_None # XXX + + def setitem(self, d, key, value): + assert isinstance(d, W_Dict) + assert isinstance(key, W_Unicode) + d.dictval[key.unival] = value + + def wrapunicode(self, x): + return W_Unicode(x) + + def wrapint(self, x): + return W_Int(x) + + def wrapfloat(self, x): + return W_Float(x) + + def wrap(self, x): + if isinstance(x, int): + return W_Int(x) + elif isinstance(x, float): + return W_Float(x) + ## elif isinstance(x, str): + ## assert False + else: + return W_Unicode(unicode(x)) + wrap._annspecialcase_ = "specialize:argtype(1)" + + +fakespace = FakeSpace() + +def myloads(msg): + return loads(fakespace, W_String(msg)) + + +def bench(title, N, fn, arg): + a = time.clock() + for i in range(N): + res = fn(arg) + b = time.clock() + print title, (b-a) / N * 1000 + +def entry_point(argv): + if len(argv) != 3: + print 'Usage: %s FILE n' % argv[0] + return 1 + filename = argv[1] + N = int(argv[2]) + f = open_file_as_stream(filename) + msg = f.readall() + + try: + bench('loads ', N, myloads, msg) + except OperationError, e: + print 'Error', e._compute_value(fakespace) + + return 0 + +# _____ Define and setup target ___ + +def target(*args): + return entry_point, None + +if __name__ == '__main__': + entry_point(sys.argv) From noreply at buildbot.pypy.org Mon Jul 8 11:17:36 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 8 Jul 2013 11:17:36 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix Message-ID: <20130708091736.9FD551C0605@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r371:1183d4c7518c Date: 2013-07-08 11:17 +0200 http://bitbucket.org/pypy/stmgc/changeset/1183d4c7518c/ Log: fix diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -271,7 +271,9 @@ DuObject *du_list(DuObject *cons, DuObject *locals) { + _du_save2(cons, locals); DuObject *list = DuList_New(); + _du_restore2(cons, locals); while (cons != Du_None) { _du_read1(cons); DuObject *expr = _DuCons_CAR(cons); @@ -299,7 +301,11 @@ else _du_getargs1("container", cons, locals, &obj); - return DuContainer_New(obj); + _du_save2(cons, locals); + DuObject *container = DuContainer_New(obj); + _du_restore2(cons, locals); + + return container; } DuObject *du_get(DuObject *cons, DuObject *locals) From noreply at buildbot.pypy.org Mon Jul 8 11:20:16 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 11:20:16 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: hg merge default Message-ID: <20130708092016.D487B1C0605@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r65261:344ca3721bd5 Date: 2013-07-08 11:17 +0200 http://bitbucket.org/pypy/pypy/changeset/344ca3721bd5/ Log: hg merge default diff too long, truncating to 2000 out of 4574 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ .idea .project .pydevproject +__pycache__ syntax: regexp ^testresult$ diff --git a/lib_pypy/ctypes_config_cache/syslog.ctc.py b/lib_pypy/ctypes_config_cache/syslog.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/syslog.ctc.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -'ctypes_configure' source for syslog.py. -Run this to rebuild _syslog_cache.py. -""" - -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger) -import dumpcache - - -_CONSTANTS = ( - 'LOG_EMERG', - 'LOG_ALERT', - 'LOG_CRIT', - 'LOG_ERR', - 'LOG_WARNING', - 'LOG_NOTICE', - 'LOG_INFO', - 'LOG_DEBUG', - - 'LOG_PID', - 'LOG_CONS', - 'LOG_NDELAY', - - 'LOG_KERN', - 'LOG_USER', - 'LOG_MAIL', - 'LOG_DAEMON', - 'LOG_AUTH', - 'LOG_LPR', - 'LOG_LOCAL0', - 'LOG_LOCAL1', - 'LOG_LOCAL2', - 'LOG_LOCAL3', - 'LOG_LOCAL4', - 'LOG_LOCAL5', - 'LOG_LOCAL6', - 'LOG_LOCAL7', -) -_OPTIONAL_CONSTANTS = ( - 'LOG_NOWAIT', - 'LOG_PERROR', - - 'LOG_SYSLOG', - 'LOG_CRON', - 'LOG_UUCP', - 'LOG_NEWS', -) - -# Constant aliases if there are not defined -_ALIAS = ( - ('LOG_SYSLOG', 'LOG_DAEMON'), - ('LOG_CRON', 'LOG_DAEMON'), - ('LOG_NEWS', 'LOG_MAIL'), - ('LOG_UUCP', 'LOG_MAIL'), -) - -class SyslogConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/syslog.h']) -for key in _CONSTANTS: - setattr(SyslogConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(SyslogConfigure, key, DefinedConstantInteger(key)) - -config = configure(SyslogConfigure) -for key in _OPTIONAL_CONSTANTS: - if config[key] is None: - del config[key] -for alias, key in _ALIAS: - config.setdefault(alias, config[key]) - -all_constants = config.keys() -all_constants.sort() -config['ALL_CONSTANTS'] = tuple(all_constants) -dumpcache.dumpcache2('syslog', config) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -46,16 +46,16 @@ if parent is not None: self.parent = parent - def switch(self, *args): + def switch(self, *args, **kwds): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch('switch', args) + return self.__switch('switch', (args, kwds)) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" return self.__switch('throw', typ, val, tb) - def __switch(target, methodname, *args): + def __switch(target, methodname, *baseargs): current = getcurrent() # while not (target.__main or _continulet.is_pending(target)): @@ -65,9 +65,9 @@ greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw - _continulet.__init__(target, greenlet_func, *args) + _continulet.__init__(target, greenlet_func, *baseargs) methodname = 'switch' - args = () + baseargs = () target.__started = True break # already done, go to the parent instead @@ -78,11 +78,15 @@ # try: unbound_method = getattr(_continulet, methodname) - args = unbound_method(current, *args, to=target) + args, kwds = unbound_method(current, *baseargs, to=target) finally: _tls.current = current # - if len(args) == 1: + if kwds: + if args: + return args, kwds + return kwds + elif len(args) == 1: return args[0] else: return args @@ -129,14 +133,15 @@ _tls.current = gmain def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: - res = greenlet.run(*args) + res = greenlet.run(*args, **kwds) except GreenletExit, e: res = e finally: _continuation.permute(greenlet, greenlet.parent) - return (res,) + return ((res,), None) def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -8,6 +8,7 @@ from ctypes import Structure, c_char_p, c_int, POINTER from ctypes_support import standard_c_lib as libc +import _structseq try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -23,32 +24,13 @@ ('gr_mem', POINTER(c_char_p)), ) -class Group(object): - def __init__(self, gr_name, gr_passwd, gr_gid, gr_mem): - self.gr_name = gr_name - self.gr_passwd = gr_passwd - self.gr_gid = gr_gid - self.gr_mem = gr_mem +class struct_group: + __metaclass__ = _structseq.structseqtype - def __getitem__(self, item): - if item == 0: - return self.gr_name - elif item == 1: - return self.gr_passwd - elif item == 2: - return self.gr_gid - elif item == 3: - return self.gr_mem - else: - raise IndexError(item) - - def __len__(self): - return 4 - - def __repr__(self): - return str((self.gr_name, self.gr_passwd, self.gr_gid, self.gr_mem)) - - # whatever else... + gr_name = _structseq.structseqfield(0) + gr_passwd = _structseq.structseqfield(1) + gr_gid = _structseq.structseqfield(2) + gr_mem = _structseq.structseqfield(3) libc.getgrgid.argtypes = [gid_t] libc.getgrgid.restype = POINTER(GroupStruct) @@ -71,8 +53,8 @@ while res.contents.gr_mem[i]: mem.append(res.contents.gr_mem[i]) i += 1 - return Group(res.contents.gr_name, res.contents.gr_passwd, - res.contents.gr_gid, mem) + return struct_group((res.contents.gr_name, res.contents.gr_passwd, + res.contents.gr_gid, mem)) @builtinify def getgrgid(gid): diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py --- a/lib_pypy/pyrepl/curses.py +++ b/lib_pypy/pyrepl/curses.py @@ -19,11 +19,15 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -# avoid importing the whole curses, if possible -try: +# If we are running on top of pypy, we import only _minimal_curses. +# Don't try to fall back to _curses, because that's going to use cffi +# and fall again more loudly. +import sys +if '__pypy__' in sys.builtin_module_names: # pypy case import _minimal_curses as _curses -except ImportError: +else: + # cpython case try: import _curses except ImportError: diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -1,3 +1,4 @@ +# this cffi version was rewritten based on the # ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides an interface to the Unix syslog library routines. @@ -9,34 +10,84 @@ if sys.platform == 'win32': raise ImportError("No syslog on Windows") -# load the platform-specific cache made by running syslog.ctc.py -from ctypes_config_cache._syslog_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes import c_int, c_char_p +from cffi import FFI try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +ffi = FFI() -# Real prototype is: -# void syslog(int priority, const char *format, ...); -# But we also need format ("%s") and one format argument (message) -_syslog = libc.syslog -_syslog.argtypes = (c_int, c_char_p, c_char_p) -_syslog.restype = None +ffi.cdef(""" +/* mandatory constants */ +#define LOG_EMERG ... +#define LOG_ALERT ... +#define LOG_CRIT ... +#define LOG_ERR ... +#define LOG_WARNING ... +#define LOG_NOTICE ... +#define LOG_INFO ... +#define LOG_DEBUG ... -_openlog = libc.openlog -_openlog.argtypes = (c_char_p, c_int, c_int) -_openlog.restype = None +#define LOG_PID ... +#define LOG_CONS ... +#define LOG_NDELAY ... -_closelog = libc.closelog -_closelog.argtypes = None -_closelog.restype = None +#define LOG_KERN ... +#define LOG_USER ... +#define LOG_MAIL ... +#define LOG_DAEMON ... +#define LOG_AUTH ... +#define LOG_LPR ... +#define LOG_LOCAL0 ... +#define LOG_LOCAL1 ... +#define LOG_LOCAL2 ... +#define LOG_LOCAL3 ... +#define LOG_LOCAL4 ... +#define LOG_LOCAL5 ... +#define LOG_LOCAL6 ... +#define LOG_LOCAL7 ... -_setlogmask = libc.setlogmask -_setlogmask.argtypes = (c_int,) -_setlogmask.restype = c_int +/* optional constants, gets defined to -919919 if missing */ +#define LOG_NOWAIT ... +#define LOG_PERROR ... + +/* aliased constants, gets defined as some other constant if missing */ +#define LOG_SYSLOG ... +#define LOG_CRON ... +#define LOG_UUCP ... +#define LOG_NEWS ... + +/* functions */ +void openlog(const char *ident, int option, int facility); +void syslog(int priority, const char *format, const char *string); +// NB. the signature of syslog() is specialized to the only case we use +void closelog(void); +int setlogmask(int mask); +""") + +lib = ffi.verify(""" +#include + +#ifndef LOG_NOWAIT +#define LOG_NOWAIT -919919 +#endif +#ifndef LOG_PERROR +#define LOG_PERROR -919919 +#endif +#ifndef LOG_SYSLOG +#define LOG_SYSLOG LOG_DAEMON +#endif +#ifndef LOG_CRON +#define LOG_CRON LOG_DAEMON +#endif +#ifndef LOG_UUCP +#define LOG_UUCP LOG_MAIL +#endif +#ifndef LOG_NEWS +#define LOG_NEWS LOG_MAIL +#endif +""") + _S_log_open = False _S_ident_o = None @@ -52,12 +103,17 @@ return None @builtinify -def openlog(ident=None, logoption=0, facility=LOG_USER): +def openlog(ident=None, logoption=0, facility=lib.LOG_USER): global _S_ident_o, _S_log_open if ident is None: ident = _get_argv() - _S_ident_o = c_char_p(ident) # keepalive - _openlog(_S_ident_o, logoption, facility) + if ident is None: + _S_ident_o = ffi.NULL + elif isinstance(ident, str): + _S_ident_o = ffi.new("char[]", ident) # keepalive + else: + raise TypeError("'ident' must be a string or None") + lib.openlog(_S_ident_o, logoption, facility) _S_log_open = True @builtinify @@ -69,19 +125,19 @@ # if log is not opened, open it now if not _S_log_open: openlog() - _syslog(priority, "%s", message) + lib.syslog(priority, "%s", message) @builtinify def closelog(): global _S_log_open, S_ident_o if _S_log_open: - _closelog() + lib.closelog() _S_log_open = False _S_ident_o = None @builtinify def setlogmask(mask): - return _setlogmask(mask) + return lib.setlogmask(mask) @builtinify def LOG_MASK(pri): @@ -91,8 +147,15 @@ def LOG_UPTO(pri): return (1 << (pri + 1)) - 1 -__all__ = ALL_CONSTANTS + ( +__all__ = [] + +for name in sorted(lib.__dict__): + if name.startswith('LOG_'): + value = getattr(lib, name) + if value != -919919: + globals()[name] = value + __all__.append(name) + +__all__ = tuple(__all__) + ( 'openlog', 'syslog', 'closelog', 'setlogmask', 'LOG_MASK', 'LOG_UPTO') - -del ALL_CONSTANTS diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -144,7 +144,7 @@ requires=module_dependencies.get(modname, []), suggests=module_suggests.get(modname, []), negation=modname not in essential_modules, - validator=get_module_validator(modname)) + ) #validator=get_module_validator(modname)) for modname in all_modules]), BoolOption("allworkingmodules", "use as many working modules as possible", diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -306,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -322,8 +335,35 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.1.rst @@ -0,0 +1,78 @@ +====================== +What's new in PyPy 2.1 +====================== + +.. this is a revision shortly after release-2.0 +.. startrev: a13c07067613 + +.. branch: ndarray-ptp +put and array.put + +.. branch: numpy-pickle +Pickling of numpy arrays and dtypes (including record dtypes) + +.. branch: remove-array-smm +Remove multimethods in the arraymodule + +.. branch: callback-stacklet +Fixed bug when switching stacklets from a C callback + +.. branch: remove-set-smm +Remove multi-methods on sets + +.. branch: numpy-subarrays +Implement subarrays for numpy + +.. branch: remove-dict-smm +Remove multi-methods on dict + +.. branch: remove-list-smm-2 +Remove remaining multi-methods on list + +.. branch: arm-stacklet +Stacklet support for ARM, enables _continuation support + +.. branch: remove-tuple-smm +Remove multi-methods on tuple + +.. branch: remove-iter-smm +Remove multi-methods on iterators + +.. branch: emit-call-x86 +.. branch: emit-call-arm + +.. branch: on-abort-resops +Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects + +.. branch: argsort-segfault +Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) + +.. branch: dtype-isnative +.. branch: ndarray-round + +.. branch: faster-str-of-bigint +Improve performance of str(long). + +.. branch: ndarray-view +Add view to ndarray and zeroD arrays, not on dtype scalars yet + +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + +.. branch: identity-set +Faster sets for objects + +.. branch: inline-identityhash +Inline the fast path of id() and hash() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,9 @@ .. this is a revision shortly after release-2.0 .. startrev: a13c07067613 +.. branch: ndarray-ptp +put and array.put + .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) @@ -57,3 +60,19 @@ Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) .. branch: dtype-isnative +.. branch: ndarray-round + +.. branch: faster-str-of-bigint +Improve performance of str(long). + +.. branch: ndarray-view +Add view to ndarray and zeroD arrays, not on dtype scalars yet + +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + +.. branch: identity-set +Faster sets for objects + +.. branch: inline-identityhash +Inline the fast path of id() and hash() diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -29,12 +29,12 @@ _application_traceback = None def __init__(self, w_type, w_value, tb=None): - assert w_type is not None self.setup(w_type) self._w_value = w_value self._application_traceback = tb def setup(self, w_type): + assert w_type is not None self.w_type = w_type if not we_are_translated(): self.debug_excs = [] @@ -347,7 +347,6 @@ self.xstrings = strings for i, _, attr in entries: setattr(self, attr, args[i]) - assert w_type is not None def _compute_value(self, space): lst = [None] * (len(formats) + len(formats) + 1) @@ -369,6 +368,18 @@ _fmtcache2[formats] = OpErrFmt return OpErrFmt, strings +class OpErrFmtNoArgs(OperationError): + + def __init__(self, w_type, value): + self.setup(w_type) + self._value = value + + def get_w_value(self, space): + w_value = self._w_value + if w_value is None: + self._w_value = w_value = space.wrap(self._value) + return w_value + def get_operationerr_class(valuefmt): try: result = _fmtcache[valuefmt] @@ -389,6 +400,8 @@ %T - The result of space.type(w_arg).getname(space) """ + if not len(args): + return OpErrFmtNoArgs(w_type, valuefmt) OpErrFmt, strings = get_operationerr_class(valuefmt) return OpErrFmt(w_type, strings, *args) operationerrfmt._annspecialcase_ = 'specialize:arg(1)' diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -33,6 +33,14 @@ operr3 = operationerrfmt("w_type2", "a %s b %s c", "bar", "4b") assert operr3.__class__ is not operr.__class__ +def test_operationerrfmt_noargs(space): + operr = operationerrfmt(space.w_AttributeError, "no attribute 'foo'") + operr.normalize_exception(space) + val = operr.get_w_value(space) + assert space.isinstance_w(val, space.w_AttributeError) + w_repr = space.repr(val) + assert space.str_w(w_repr) == "AttributeError(\"no attribute 'foo'\",)" + def test_operationerrfmt_T(space): operr = operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -41,8 +41,8 @@ def save_field(self, field_builder): field = field_builder.build() if self.numeric_field: - from pypy.objspace.std.strutil import ParseStringError - from pypy.objspace.std.strutil import string_to_float + from rpython.rlib.rstring import ParseStringError + from rpython.rlib.rfloat import string_to_float self.numeric_field = False try: ff = string_to_float(field) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -101,6 +101,9 @@ raise OperationError( space.w_ValueError, space.wrap(message)) + def check_closed_w(self, space): + self._check_closed(space) + def closed_get_w(self, space): return space.newbool(self.__IOBase_closed) @@ -277,6 +280,7 @@ _checkReadable = interp2app(check_readable_w), _checkWritable = interp2app(check_writable_w), _checkSeekable = interp2app(check_seekable_w), + _checkClosed = interp2app(W_IOBase.check_closed_w), closed = GetSetProperty(W_IOBase.closed_get_w), __dict__ = GetSetProperty(descr_get_dict, descr_set_dict, cls=W_IOBase), __weakref__ = make_weakref_descr(W_IOBase), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -22,7 +22,9 @@ import io with io.BufferedIOBase() as f: assert not f.closed + f._checkClosed() assert f.closed + raises(ValueError, f._checkClosed) def test_iter(self): import io diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -71,6 +71,7 @@ 'complex_': 'interp_boxes.W_Complex128Box', 'complex128': 'interp_boxes.W_Complex128Box', 'complex64': 'interp_boxes.W_Complex64Box', + 'cfloat': 'interp_boxes.W_Complex64Box', } if ENABLED_LONG_DOUBLE: long_double_dtypes = [ @@ -183,6 +184,7 @@ appleveldefs = {} interpleveldefs = { 'choose': 'interp_arrayops.choose', + 'put': 'interp_arrayops.put', 'repeat': 'interp_arrayops.repeat', } submodules = { diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -75,6 +75,12 @@ else: return None + def get_view(self, orig_array, dtype, new_shape): + strides, backstrides = support.calc_strides(new_shape, dtype, + self.order) + return SliceArray(self.start, strides, backstrides, new_shape, + self, orig_array, dtype=dtype) + def get_real(self, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -13,6 +13,9 @@ def next(self): self.called_once = True + def next_skip_x(self, n): + self.called_once = True + def getitem(self): return self.v.get_scalar_value() @@ -63,6 +66,11 @@ def transpose(self, _): return self + def get_view(self, orig_array, dtype, new_shape): + scalar = Scalar(dtype) + scalar.value = self.value.convert_to(dtype) + return scalar + def get_real(self, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -51,6 +51,7 @@ w_IndexError = W_TypeObject("IndexError") w_OverflowError = W_TypeObject("OverflowError") w_NotImplementedError = W_TypeObject("NotImplementedError") + w_AttributeError = W_TypeObject("AttributeError") w_None = None w_bool = W_TypeObject("bool") diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -65,7 +65,7 @@ [ 3., 4., -1.], [-1., -1., -1.]]) - + NOTE: support for not passing x and y is unsupported """ if space.is_none(w_y): @@ -122,10 +122,10 @@ for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("record type mismatch")) elif dtype.is_record_type() or a_dt.is_record_type(): - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) @@ -192,6 +192,61 @@ loop.choose(space, arr, choices, shape, dtype, out, MODES[mode]) return out + + at unwrap_spec(mode=str) +def put(space, w_arr, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy import constants + from pypy.module.micronumpy.support import int_w + + arr = convert_to_array(space, w_arr) + + if mode not in constants.MODES: + raise OperationError(space.w_ValueError, + space.wrap("mode %s not known" % (mode,))) + if not w_indices: + raise OperationError(space.w_ValueError, + space.wrap("indice list cannot be empty")) + if not w_values: + raise OperationError(space.w_ValueError, + space.wrap("value list cannot be empty")) + + dtype = arr.get_dtype() + + if space.isinstance_w(w_indices, space.w_list): + indices = space.listview(w_indices) + else: + indices = [w_indices] + + if space.isinstance_w(w_values, space.w_list): + values = space.listview(w_values) + else: + values = [w_values] + + v_idx = 0 + for idx in indices: + index = int_w(space, idx) + + if index < 0 or index >= arr.get_size(): + if constants.MODES[mode] == constants.MODE_RAISE: + raise OperationError(space.w_ValueError, space.wrap( + "invalid entry in choice array")) + elif constants.MODES[mode] == constants.MODE_WRAP: + index = index % arr.get_size() + else: + assert constants.MODES[mode] == constants.MODE_CLIP + if index < 0: + index = 0 + else: + index = arr.get_size() - 1 + + value = values[v_idx] + + if v_idx + 1 < len(values): + v_idx += 1 + + arr.setitem(space, [index], dtype.coerce(space, value)) + + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -235,6 +235,21 @@ w_values = space.newtuple([self]) return convert_to_array(space, w_values) + @unwrap_spec(decimals=int) + def descr_round(self, space, decimals=0): + v = self.convert_to(self.get_dtype(space)) + return self.get_dtype(space).itemtype.round(v, decimals) + + def descr_view(self, space, w_dtype): + from pypy.module.micronumpy.interp_dtype import W_Dtype + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() != self.get_dtype(space).get_size(): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + raise OperationError(space.w_NotImplementedError, space.wrap( + "view not implelemnted yet")) + class W_BoolBox(W_GenericBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") @@ -501,6 +516,8 @@ any = interp2app(W_GenericBox.descr_any), all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), + round = interp2app(W_GenericBox.descr_round), + view = interp2app(W_GenericBox.descr_view), ) W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -550,17 +550,36 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "ptp (peak to peak) not implemented yet")) - def descr_put(self, space, w_indices, w_values, w_mode='raise'): - raise OperationError(space.w_NotImplementedError, space.wrap( - "put not implemented yet")) + @unwrap_spec(mode=str) + def descr_put(self, space, w_indices, w_values, mode='raise'): + from pypy.module.micronumpy.interp_arrayops import put + put(space, self, w_indices, w_values, mode) def descr_resize(self, space, w_new_shape, w_refcheck=True): raise OperationError(space.w_NotImplementedError, space.wrap( "resize not implemented yet")) - def descr_round(self, space, w_decimals=0, w_out=None): - raise OperationError(space.w_NotImplementedError, space.wrap( - "round not implemented yet")) + @unwrap_spec(decimals=int) + def descr_round(self, space, decimals=0, w_out=None): + if space.is_none(w_out): + if self.get_dtype().is_bool_type(): + #numpy promotes bool.round() to float16. Go figure. + w_out = W_NDimArray.from_shape(self.get_shape(), + interp_dtype.get_dtype_cache(space).w_float16dtype) + else: + w_out = None + elif not isinstance(w_out, W_NDimArray): + raise OperationError(space.w_TypeError, space.wrap( + "return arrays must be of ArrayType")) + out = interp_dtype.dtype_agreement(space, [self], self.get_shape(), + w_out) + if out.get_dtype().is_bool_type() and self.get_dtype().is_bool_type(): + calc_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + else: + calc_dtype = out.get_dtype() + + loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) + return out def descr_searchsorted(self, space, w_v, w_side='left'): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -600,8 +619,40 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - raise OperationError(space.w_NotImplementedError, space.wrap( - "view not implemented yet")) + if w_type is not None: + raise OperationError(space.w_NotImplementedError, space.wrap( + "view(... type=) not implemented yet")) + if w_dtype: + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), + w_dtype)) + else: + dtype = self.get_dtype() + old_itemsize = self.get_dtype().get_size() + new_itemsize = dtype.get_size() + impl = self.implementation + new_shape = self.get_shape()[:] + dims = len(new_shape) + if dims == 0: + # Cannot resize scalars + if old_itemsize != new_itemsize: + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array shape")) + else: + if dims == 1 or impl.get_strides()[0] < impl.get_strides()[-1]: + # Column-major, resize first dimension + if new_shape[0] * old_itemsize % new_itemsize != 0: + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + new_shape[0] = new_shape[0] * old_itemsize / new_itemsize + else: + # Row-major, resize last dimension + if new_shape[-1] * old_itemsize % new_itemsize != 0: + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize + return W_NDimArray(impl.get_view(self, dtype, new_shape)) + # --------------------- operations ---------------------------- @@ -939,6 +990,7 @@ prod = interp2app(W_NDimArray.descr_prod), max = interp2app(W_NDimArray.descr_max), min = interp2app(W_NDimArray.descr_min), + put = interp2app(W_NDimArray.descr_put), argmax = interp2app(W_NDimArray.descr_argmax), argmin = interp2app(W_NDimArray.descr_argmin), all = interp2app(W_NDimArray.descr_all), @@ -975,8 +1027,10 @@ byteswap = interp2app(W_NDimArray.descr_byteswap), choose = interp2app(W_NDimArray.descr_choose), clip = interp2app(W_NDimArray.descr_clip), + round = interp2app(W_NDimArray.descr_round), data = GetSetProperty(W_NDimArray.descr_get_data), diagonal = interp2app(W_NDimArray.descr_diagonal), + view = interp2app(W_NDimArray.descr_view), ctypes = GetSetProperty(W_NDimArray.descr_get_ctypes), # XXX unimplemented __array_interface__ = GetSetProperty(W_NDimArray.descr_array_iface), @@ -1009,15 +1063,21 @@ order) dtype = interp_dtype.decode_w_dtype(space, w_dtype) - if isinstance(w_object, W_NDimArray): - if (not space.is_none(w_dtype) and - w_object.get_dtype() is not dtype): - raise OperationError(space.w_NotImplementedError, space.wrap( - "copying over different dtypes unsupported")) + if isinstance(w_object, W_NDimArray) and \ + (space.is_none(w_dtype) or w_object.get_dtype() is dtype): + shape = w_object.get_shape() if copy: - return w_object.descr_copy(space) - return w_object - + w_ret = w_object.descr_copy(space) + else: + if ndmin<= len(shape): + return w_object + new_impl = w_object.implementation.set_shape(space, w_object, shape) + w_ret = W_NDimArray(new_impl) + if ndmin > len(shape): + shape = [1] * (ndmin - len(shape)) + shape + w_ret.implementation = w_ret.implementation.set_shape(space, + w_ret, shape) + return w_ret shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None or ( dtype.is_str_or_unicode() and dtype.itemtype.get_size() < 1): diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -2,7 +2,7 @@ from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import interp_dtype, loop -from pypy.objspace.std.strutil import strip_spaces +from rpython.rlib.rstring import strip_spaces from rpython.rlib.rarithmetic import maxint from pypy.module.micronumpy.base import W_NDimArray diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -37,7 +37,7 @@ we can go faster. All the calculations happen in next() -next_skip_x() tries to do the iteration for a number of steps at once, +next_skip_x(steps) tries to do the iteration for a number of steps at once, but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ @@ -46,6 +46,7 @@ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.arrayimpl import base +from pypy.module.micronumpy.support import product from rpython.rlib import jit # structures to describe slicing @@ -225,7 +226,7 @@ self.shape = shape self.offset = start self.shapelen = len(shape) - self._done = False + self._done = self.shapelen == 0 or product(shape) == 0 self.strides = strides self.backstrides = backstrides self.size = array.size @@ -284,7 +285,7 @@ self.backstrides = backstrides[:dim] + [0] + backstrides[dim:] self.first_line = True self.indices = [0] * len(shape) - self._done = False + self._done = array.get_size() == 0 self.offset = array.start self.dim = dim self.array = array diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -173,7 +173,7 @@ iter = x_iter shapelen = len(shape) while not iter.done(): - where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, + where_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, arr_dtype=arr_dtype) w_cond = arr_iter.getitem() if arr_dtype.itemtype.bool(w_cond): @@ -188,7 +188,7 @@ return out axis_reduce__driver = jit.JitDriver(name='numpy_axis_reduce', - greens=['shapelen', + greens=['shapelen', 'func', 'dtype', 'identity'], reds='auto') @@ -228,7 +228,7 @@ arg_driver = jit.JitDriver(name='numpy_' + op_name, greens = ['shapelen', 'dtype'], reds = 'auto') - + def argmin_argmax(arr): result = 0 idx = 1 @@ -265,7 +265,7 @@ result.shape == [3, 5, 2, 4] broadcast shape should be [3, 5, 2, 7, 4] result should skip dims 3 which is len(result_shape) - 1 - (note that if right is 1d, result should + (note that if right is 1d, result should skip len(result_shape)) left should skip 2, 4 which is a.ndims-1 + range(right.ndims) except where it==(right.ndims-2) @@ -283,9 +283,9 @@ righti = right.create_dot_iter(broadcast_shape, right_skip) while not outi.done(): dot_driver.jit_merge_point(dtype=dtype) - lval = lefti.getitem().convert_to(dtype) - rval = righti.getitem().convert_to(dtype) - outval = outi.getitem().convert_to(dtype) + lval = lefti.getitem().convert_to(dtype) + rval = righti.getitem().convert_to(dtype) + outval = outi.getitem().convert_to(dtype) v = dtype.itemtype.mul(lval, rval) value = dtype.itemtype.add(v, outval).convert_to(dtype) outi.setitem(value) @@ -355,7 +355,7 @@ setitem_filter_driver.jit_merge_point(shapelen=shapelen, index_dtype=index_dtype, arr_dtype=arr_dtype, - ) + ) if index_iter.getitem_bool(): arr_iter.setitem(value_iter.getitem()) value_iter.next() @@ -572,6 +572,21 @@ out_iter.next() min_iter.next() +round_driver = jit.JitDriver(greens = ['shapelen', 'dtype'], + reds = 'auto') + +def round(space, arr, dtype, shape, decimals, out): + arr_iter = arr.create_iter(shape) + shapelen = len(shape) + out_iter = out.create_iter(shape) + while not arr_iter.done(): + round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(dtype), + decimals) + out_iter.setitem(w_v) + arr_iter.next() + out_iter.next() + diagonal_simple_driver = jit.JitDriver(greens = ['axis1', 'axis2'], reds = 'auto') @@ -613,4 +628,4 @@ out_iter.setitem(arr.getitem_index(space, indexes)) iter.next() out_iter.next() - + diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -132,3 +132,26 @@ x = array([0, 0, 0], dtype='i2') r = array([2, 1, 0]).choose([a, b, c], out=x) assert r.dtype == 'i2' + + def test_put_basic(self): + from numpypy import arange, array + a = arange(5) + a.put([0, 2], [-44, -55]) + assert (a == array([-44, 1, -55, 3, 4])).all() + a = arange(5) + a.put([3, 4], 9) + assert (a == array([0, 1, 2, 9, 9])).all() + a = arange(5) + a.put(1, [7, 8]) + assert (a == array([0, 7, 2, 3, 4])).all() + + def test_put_modes(self): + from numpypy import array, arange + a = arange(5) + a.put(22, -5, mode='clip') + assert (a == array([0, 1, 2, 3, -5])).all() + a = arange(5) + a.put(22, -5, mode='wrap') + assert (a == array([0, 1, -5, 3, 4])).all() + raises(ValueError, "arange(5).put(22, -5, mode='raise')") + raises(ValueError, "arange(5).put(22, -5, mode='wrongmode')") diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -585,6 +585,7 @@ import numpypy as numpy assert numpy.complex_ is numpy.complex128 + assert numpy.cfloat is numpy.complex64 assert numpy.complex64.__mro__ == (numpy.complex64, numpy.complexfloating, numpy.inexact, numpy.number, numpy.generic, object) diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -1,4 +1,5 @@ from pypy.module.micronumpy.iter import MultiDimViewIterator +from pypy.module.micronumpy.arrayimpl.scalar import ScalarIterator class MockArray(object): size = 1 @@ -8,7 +9,7 @@ #Let's get started, simple iteration in C order with #contiguous layout => strides[-1] is 1 start = 0 - shape = [3, 5] + shape = [3, 5] strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] @@ -47,7 +48,7 @@ #iteration in C order with #contiguous layout => strides[-1] is 1 #skip less than the shape start = 0 - shape = [3, 5] + shape = [3, 5] strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] @@ -89,3 +90,9 @@ assert i.indexes == [0,1] assert i.offset == 3 assert i.done() + + def test_scalar_iter(self): + i = ScalarIterator(MockArray) + i.next() + i.next_skip_x(3) + assert i.done() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -276,6 +276,32 @@ arr = array([1], ndmin=3) assert arr.shape == (1, 1, 1) + def test_array_copy(self): + from numpypy import array + a = array(range(12)).reshape(3,4) + b = array(a, ndmin=4) + assert b.shape == (1, 1, 3, 4) + b = array(a, copy=False) + b[0, 0] = 100 + assert a[0, 0] == 100 + b = array(a, copy=True, ndmin=2) + b[0, 0] = 0 + assert a[0, 0] == 100 + b = array(a, dtype=float) + assert (b[0] == [100, 1, 2, 3]).all() + assert b.dtype.kind == 'f' + b = array(a, copy=False, ndmin=4) + b[0,0,0,0] = 0 + assert a[0, 0] == 0 + a = array([[[]]]) + # Simulate tiling an empty array, really tests repeat, reshape + # b = tile(a, (3, 2, 5)) + reps = (3, 4, 5) + c = array(a, copy=False, subok=True, ndmin=len(reps)) + d = c.reshape(3, 4, 0) + e = d.repeat(3, 0) + assert e.shape == (9, 4, 0) + def test_type(self): from numpypy import array ar = array(range(5)) @@ -325,13 +351,16 @@ assert a[1] == 1.0 def test_ones(self): - from numpypy import ones + from numpypy import ones, dtype a = ones(3) assert len(a) == 3 assert a[0] == 1 raises(IndexError, "a[3]") a[2] = 4 assert a[2] == 4 + b = ones(3, complex) + assert b[0] == 1+0j + assert b.dtype is dtype(complex) def test_copy(self): from numpypy import arange, array @@ -1390,6 +1419,45 @@ assert a[3].imag == -10 assert a[2].imag == -5 + def test_view(self): + from numpypy import array, int8, int16, dtype + x = array((1, 2), dtype=int8) + assert x.shape == (2,) + y = x.view(dtype=int16) + assert x.shape == (2,) + assert y[0] == 513 + assert y.dtype == dtype('int16') + y[0] = 670 + assert x[0] == -98 + assert x[1] == 2 + f = array([1000, -1234], dtype='i4') + nnp = self.non_native_prefix + d = f.view(dtype=nnp + 'i4') + assert (d == [-402456576, 788267007]).all() + x = array(range(15), dtype='i2').reshape(3,5) + exc = raises(ValueError, x.view, dtype='i4') + assert exc.value[0] == "new type not compatible with array." + assert x.view('int8').shape == (3, 10) + x = array(range(15), dtype='int16').reshape(3,5).T + assert x.view('int8').shape == (10, 3) + + def test_ndarray_view_empty(self): + from numpypy import array, int8, int16, dtype + x = array([], dtype=[('a', int8), ('b', int8)]) + y = x.view(dtype=int16) + + def test_scalar_view(self): + from numpypy import int64, array + a = array(0, dtype='int32') + b = a.view(dtype='float32') + assert b.shape == () + assert b == 0 + s = int64(12) + exc = raises(ValueError, s.view, 'int8') + assert exc.value[0] == "new type not compatible with array." + skip('not implemented yet') + assert s.view('double') < 7e-323 + def test_tolist_scalar(self): from numpypy import int32, bool_ x = int32(23) @@ -2176,6 +2244,10 @@ d.fill(100) assert d == 100 + e = array(10, dtype=complex) + e.fill(1.5-3j) + assert e == 1.5-3j + def test_array_indexing_bool(self): from numpypy import arange a = arange(10) @@ -2498,6 +2570,9 @@ a = array(range(100) + range(100) + range(100)) b = a.argsort() assert (b[:3] == [0, 100, 200]).all() + a = array([[[]]]).reshape(3,4,0) + b = a.argsort() + assert b.size == 0 def test_argsort_random(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -21,3 +21,19 @@ a = zeros(3) assert loads(dumps(sum(a))) == sum(a) + + def test_round(self): + from numpypy import int32, float64, complex128, bool + i = int32(1337) + f = float64(13.37) + c = complex128(13 + 37.j) + b = bool(0) + assert i.round(decimals=-2) == 1300 + assert i.round(decimals=1) == 1337 + assert c.round() == c + assert f.round() == 13. + assert f.round(decimals=-1) == 10. + assert f.round(decimals=1) == 13.4 + exc = raises(AttributeError, 'b.round()') + assert exc.value[0] == "'bool' object has no attribute 'round'" + diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -286,7 +286,7 @@ skip('sign of nan is non-determinant') assert (signbit([float('nan'), float('-nan'), -float('nan')]) == - [False, True, True]).all() + [False, True, True]).all() def test_reciprocal(self): from numpypy import array, reciprocal, complex64, complex128 @@ -334,6 +334,23 @@ assert all([math.copysign(1, f(abs(float("nan")))) == 1 for f in floor, ceil, trunc]) assert all([math.copysign(1, f(-abs(float("nan")))) == -1 for f in floor, ceil, trunc]) + def test_round(self): + from numpypy import array, dtype + ninf, inf = float("-inf"), float("inf") + a = array([ninf, -1.4, -1.5, -1.0, 0.0, 1.0, 1.4, 0.5, inf]) + assert ([ninf, -1.0, -2.0, -1.0, 0.0, 1.0, 1.0, 0.0, inf] == a.round()).all() + i = array([-1000, -100, -1, 0, 1, 111, 1111, 11111], dtype=int) + assert (i == i.round()).all() + assert (i.round(decimals=4) == i).all() + assert (i.round(decimals=-4) == [0, 0, 0, 0, 0, 0, 0, 10000]).all() + b = array([True, False], dtype=bool) + bround = b.round() + assert (bround == [1., 0.]).all() + assert bround.dtype is dtype('float16') + c = array([10.5+11.5j, -15.2-100.3456j, 0.2343+11.123456j]) + assert (c.round(0) == [10.+12.j, -15-100j, 0+11j]).all() + + def test_copysign(self): from numpypy import array, copysign @@ -364,7 +381,7 @@ assert b[i] == res def test_exp2(self): - import math + import math from numpypy import array, exp2 inf = float('inf') ninf = -float('inf') @@ -759,8 +776,8 @@ complex(inf, inf), complex(inf, ninf), complex(0, inf), complex(ninf, ninf), complex(nan, 0), complex(0, nan), complex(nan, nan)] - assert (isfinite(a) == [True, True, False, False, False, - False, False, False, False, False]).all() + assert (isfinite(a) == [True, True, False, False, False, + False, False, False, False, False]).all() def test_logical_ops(self): from numpypy import logical_and, logical_or, logical_xor, logical_not @@ -864,7 +881,7 @@ #numpy returns (a.real*b.real + a.imag*b.imag) / abs(b)**2 expect = [3., -23., 1.] for i in range(len(a)): - assert b[i] == expect[i] + assert b[i] == expect[i] b = floor_divide(a[0], 0.) assert math.isnan(b.real) assert b.imag == 0. diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -37,7 +37,7 @@ return self.box( func( self, - self.for_computation(raw) + self.for_computation(raw), ) ) return dispatcher @@ -521,6 +521,23 @@ return v return 0 + @specialize.argtype(1) + def round(self, v, decimals=0): + raw = self.for_computation(self.unbox(v)) + if decimals < 0: + # No ** in rpython + factor = 1 + for i in xrange(-decimals): + factor *=10 + #int does floor division, we want toward zero + if raw < 0: + ans = - (-raw / factor * factor) + else: + ans = raw / factor * factor + else: + ans = raw + return self.box(ans) + @raw_unary_op def signbit(self, v): return v < 0 @@ -798,6 +815,16 @@ def ceil(self, v): return math.ceil(v) + @specialize.argtype(1) + def round(self, v, decimals=0): + raw = self.for_computation(self.unbox(v)) + if rfloat.isinf(raw): + return v + elif rfloat.isnan(raw): + return v + ans = rfloat.round_double(raw, decimals, half_even=True) + return self.box(ans) + @simple_unary_op def trunc(self, v): if v < 0: @@ -1073,6 +1100,13 @@ op = '+' if imag >= 0 else '' return ''.join(['(', real_str, op, imag_str, ')']) + def fill(self, storage, width, box, start, stop, offset): + real, imag = self.unbox(box) + for i in xrange(start, stop, width): + raw_storage_setitem(storage, i+offset, real) + raw_storage_setitem(storage, + i+offset+rffi.sizeof(self.T), imag) + @staticmethod def for_computation(v): return float(v[0]), float(v[1]) @@ -1354,6 +1388,15 @@ except ZeroDivisionError: return rfloat.NAN, rfloat.NAN + @specialize.argtype(1) + def round(self, v, decimals=0): + ans = list(self.for_computation(self.unbox(v))) + if isfinite(ans[0]): + ans[0] = rfloat.round_double(ans[0], decimals, half_even=True) + if isfinite(ans[1]): + ans[1] = rfloat.round_double(ans[1], decimals, half_even=True) + return self.box_complex(ans[0], ans[1]) + # No floor, ceil, trunc in numpy for complex #@simple_unary_op #def floor(self, v): diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -131,18 +131,19 @@ def has_id(self, id): return id in self.ids - def _ops_for_chunk(self, chunk, include_debug_merge_points): + def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point' or include_debug_merge_points: + if op.name != 'debug_merge_point' and \ + (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op - def _allops(self, include_debug_merge_points=False, opcode=None): + def _allops(self, opcode=None, include_guard_not_invalidated=True): opcode_name = opcode for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode_name is None or \ (opcode and opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: for op in chunk.operations: @@ -162,15 +163,15 @@ def print_ops(self, *args, **kwds): print self.format_ops(*args, **kwds) - def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): + def _ops_by_id(self, id, include_guard_not_invalidated=True, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] - loop_ops = self.allops(include_debug_merge_points, opcode) + loop_ops = self.allops(opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): if op in loop_ops: yield op diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -201,10 +201,27 @@ def main(n): i = 0 while i < n: - s = set([1,2,3]) + s = set([1, 2, 3]) i += 1 log = self.run(main, [1000]) assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + + def test_specialised_tuple(self): + def main(n): + import pypyjit + + f = lambda: None + tup = (n, n) + while n > 0: + tup[0] # ID: getitem + pypyjit.residual_call(f) + n -= 1 + + log = self.run(main, [1000]) + assert log.result == main(1000) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) + assert log.opnames(ops) == [] diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,39 +80,11 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p28 = call(ConstClass(strip_spaces), p25, descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i29 = strlen(p28) - i30 = int_is_true(i29) - guard_true(i30, descr=...) - i32 = int_sub(i29, 1) - i33 = strgetitem(p28, i32) - i35 = int_eq(i33, 108) - guard_false(i35, descr=...) - i37 = int_eq(i33, 76) - guard_false(i37, descr=...) - i39 = strgetitem(p28, 0) - i41 = int_eq(i39, 45) - guard_false(i41, descr=...) - i43 = int_eq(i39, 43) - guard_false(i43, descr=...) - i43 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr42), descr=) - guard_false(i43, descr=...) - i46 = call(ConstClass(ll_startswith__rpy_stringPtr_rpy_stringPtr), p28, ConstPtr(ptr45), descr=) - guard_false(i46, descr=...) - p51 = new_with_vtable(...) - setfield_gc(p51, _, descr=...) # 7 setfields, but the order is dict-order-dependent - setfield_gc(p51, _, descr=...) - setfield_gc(p51, _, descr=...) - setfield_gc(p51, _, descr=...) - setfield_gc(p51, _, descr=...) - setfield_gc(p51, _, descr=...) - setfield_gc(p51, _, descr=...) - p55 = call(ConstClass(parse_digit_string), p51, descr=) + i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) - i57 = call(ConstClass(rbigint.toint), p55, descr=) - guard_no_exception(descr=...) - i58 = int_add_ovf(i6, i57) + i95 = int_add_ovf(i6, i94) guard_no_overflow(descr=...) --TICK-- jump(..., descr=...) diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -32,14 +32,6 @@ return d -def test_syslog(): - try: - import lib_pypy.syslog - except ImportError: - py.test.skip('no syslog on this platform') - d = run('syslog.ctc.py', '_syslog_cache.py') - assert 'LOG_NOTICE' in d - def test_resource(): try: import lib_pypy.resource diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -319,3 +319,25 @@ g = G(lambda: 42) x = g.switch() assert x == 42 + + def test_kwargs_to_f(self): + import greenlet + seen = [] + def f(*args, **kwds): + seen.append([args, kwds]) + g = greenlet.greenlet(f) + g.switch(1, 2, x=3, y=4) + assert seen == [[(1, 2), {'x': 3, 'y': 4}]] + + def test_kwargs_to_switch(self): + import greenlet + main = greenlet.getcurrent() + assert main.switch() == () + assert main.switch(5) == 5 + assert main.switch(5, 6) == (5, 6) + # + assert main.switch(x=5) == {'x': 5} + assert main.switch(x=5, y=6) == {'x': 5, 'y': 6} + assert main.switch(3, x=5) == ((3,), {'x': 5}) + assert main.switch(3, x=5, y=6) == ((3,), {'x': 5, 'y': 6}) + assert main.switch(2, 3, x=6) == ((2, 3), {'x': 6}) diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -5,6 +5,22 @@ except ImportError: py.test.skip("No grp module on this platform") +def test_basic(): + g = grp.getgrnam("root") + assert g.gr_gid == 0 + assert g.gr_mem == ['root'] or g.gr_mem == [] + assert g.gr_name == 'root' + assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) + def test_extra(): py.test.raises(TypeError, grp.getgrnam, False) py.test.raises(TypeError, grp.getgrnam, None) + +def test_struct_group(): + g = grp.struct_group((10, 20, 30, 40)) + assert len(g) == 4 + assert list(g) == [10, 20, 30, 40] + assert g.gr_name == 10 + assert g.gr_passwd == 20 + assert g.gr_gid == 30 + assert g.gr_mem == 40 diff --git a/pypy/module/test_lib_pypy/test_syslog.py b/pypy/module/test_lib_pypy/test_syslog.py --- a/pypy/module/test_lib_pypy/test_syslog.py +++ b/pypy/module/test_lib_pypy/test_syslog.py @@ -1,15 +1,15 @@ from __future__ import absolute_import -import py +import sys, py try: from lib_pypy import syslog except ImportError: py.test.skip('no syslog on this platform') +except AssertionError: + if '__pypy__' in sys.builtin_module_names: + raise + py.test.skip('AssertionError during import (wrong cffi version?)') # XXX very minimal test -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('syslog.ctc.py') - - def test_syslog(): assert hasattr(syslog, 'LOG_ALERT') diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -1,10 +1,11 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.strutil import string_to_float, ParseStringError from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef from pypy.objspace.std.stdtypedef import StdObjSpaceMultiMethod +from rpython.rlib.rfloat import string_to_float +from rpython.rlib.rstring import ParseStringError # ERRORCODES diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -225,8 +225,8 @@ space.raise_key_error(w_key) def descr_reversed(self, space): - raise OperationError(space.w_TypeError, space.wrap( - 'argument to reversed() must be a sequence')) + raise operationerrfmt(space.w_TypeError, + 'argument to reversed() must be a sequence') def descr_copy(self, space): """D.copy() -> a shallow copy of D""" diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -8,10 +8,9 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef, SMM -from pypy.objspace.std.strutil import ParseStringError -from pypy.objspace.std.strutil import string_to_float from pypy.objspace.std.model import W_Object from rpython.rlib.rbigint import rbigint +from rpython.rlib.rstring import ParseStringError float_as_integer_ratio = SMM("as_integer_ratio", 1) @@ -41,7 +40,7 @@ space.isinstance_w(w_value, space.w_bytearray)): strvalue = space.bufferstr_w(w_value) try: - value = string_to_float(strvalue) + value = rfloat.string_to_float(strvalue) except ParseStringError, e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) @@ -49,7 +48,7 @@ from unicodeobject import unicode_to_decimal_w strvalue = unicode_to_decimal_w(space, w_value) try: - value = string_to_float(strvalue) + value = rfloat.string_to_float(strvalue) except ParseStringError, e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -5,13 +5,12 @@ from pypy.interpreter.buffer import Buffer from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef, SMM -from pypy.objspace.std.strutil import (string_to_int, string_to_bigint, - ParseStringError, - ParseStringOverflowError) from pypy.objspace.std.model import W_Object -from rpython.rlib.rarithmetic import r_uint +from rpython.rlib.rarithmetic import r_uint, string_to_int from rpython.rlib.objectmodel import instantiate from rpython.rlib.rbigint import rbigint +from rpython.rlib.rstring import ParseStringError, ParseStringOverflowError +from rpython.rlib import jit # ____________________________________________________________ @@ -63,6 +62,7 @@ # ____________________________________________________________ + at jit.elidable def string_to_int_or_long(space, string, base=10): w_longval = None value = 0 @@ -75,15 +75,14 @@ w_longval = retry_to_w_long(space, e.parser) return value, w_longval -def retry_to_w_long(space, parser, base=0): +def retry_to_w_long(space, parser): parser.rewind() try: - bigint = string_to_bigint(None, base=base, parser=parser) + bigint = rbigint._from_numberstring_parser(parser) except ParseStringError, e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) - from pypy.objspace.std.longobject import newlong - return newlong(space, bigint) + return space.newlong_from_rbigint(bigint) @unwrap_spec(w_x = WrappedDefault(0)) def descr__new__(space, w_inttype, w_x, w_base=None): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1222,7 +1222,8 @@ def _safe_find(self, w_list, obj, start, stop): l = self.unerase(w_list.lstorage) for i in range(start, min(stop, len(l))): - if l[i] == obj: + val = l[i] + if val == obj: return i raise ValueError @@ -1542,18 +1543,6 @@ if reverse: l.reverse() - def _safe_find(self, w_list, obj, start, stop): - from rpython.rlib.rfloat import isnan - if not isnan(obj): - return AbstractUnwrappedStrategy._safe_find(self, w_list, obj, From noreply at buildbot.pypy.org Mon Jul 8 11:20:18 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 11:20:18 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: a branch where to improve the performance of str2charp and friends by using memcpy Message-ID: <20130708092018.356471C0605@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65262:c42db549a2fc Date: 2013-07-08 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/c42db549a2fc/ Log: a branch where to improve the performance of str2charp and friends by using memcpy From noreply at buildbot.pypy.org Mon Jul 8 11:30:24 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 11:30:24 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add doc for _pypyjson Message-ID: <20130708093024.9F40E1C13AA@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r65263:440ba629bad5 Date: 2013-07-08 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/440ba629bad5/ Log: add doc for _pypyjson diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module From noreply at buildbot.pypy.org Mon Jul 8 11:36:29 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 8 Jul 2013 11:36:29 +0200 (CEST) Subject: [pypy-commit] stmgc default: backout 840ac0a, demo/trees2.duh seems to crash without tracing Message-ID: <20130708093629.96C621C13AA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r372:2b2561c18ea9 Date: 2013-07-08 11:36 +0200 http://bitbucket.org/pypy/stmgc/changeset/2b2561c18ea9/ Log: backout 840ac0a, demo/trees2.duh seems to crash without tracing diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -225,8 +225,8 @@ id_copy->h_tid |= GCFLAG_VISITED; /* XXX: may not always need tracing? */ - //if (!(id_copy->h_tid & GCFLAG_STUB)) - // gcptrlist_insert(&objects_to_trace, id_copy); + if (!(id_copy->h_tid & GCFLAG_STUB)) + gcptrlist_insert(&objects_to_trace, id_copy); } else { /* prebuilt originals won't get collected anyway From noreply at buildbot.pypy.org Mon Jul 8 13:27:16 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 8 Jul 2013 13:27:16 +0200 (CEST) Subject: [pypy-commit] stmgc default: repeat/do write barrier after minor_collection. I think the problem was that minor_collect clears old_objects_to_trace and the object needs to be reregistered there if it is modified again. Message-ID: <20130708112716.11DD21C00B9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r373:470bcb38a12e Date: 2013-07-08 13:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/470bcb38a12e/ Log: repeat/do write barrier after minor_collection. I think the problem was that minor_collect clears old_objects_to_trace and the object needs to be reregistered there if it is modified again. diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -75,7 +75,7 @@ void _list_append(DuListObject *ob, DuObject *x) { - _du_write1(ob); + _du_read1(ob); DuTupleObject *olditems = ob->ob_tuple; _du_read1(olditems); @@ -85,6 +85,8 @@ DuTupleObject *newitems = DuTuple_New(newcount); _du_restore3(ob, x, olditems); + _du_write1(ob); + for (i=0; iob_items[i] = olditems->ob_items[i]; newitems->ob_items[newcount-1] = x; From noreply at buildbot.pypy.org Mon Jul 8 14:37:39 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 8 Jul 2013 14:37:39 +0200 (CEST) Subject: [pypy-commit] stmgc default: A test that fails. Maybe should fail, waiting for arigato's OK Message-ID: <20130708123739.0A11F1C0113@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r374:ef8442a75174 Date: 2013-07-08 14:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/ef8442a75174/ Log: A test that fails. Maybe should fail, waiting for arigato's OK diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -204,6 +204,36 @@ assert p4 == p2 assert list_of_read_objects() == [p2] +def test_write_barrier_after_minor_collect(): + # maybe should fail. not sure. + p = oalloc_refs(1) + pw = lib.stm_write_barrier(p) + + lib.stm_push_root(pw) + minor_collect() + r = nalloc(HDR) + pw = lib.stm_pop_root() + + assert pw.h_tid & GCFLAG_OLD + rawsetptr(pw, 0, r) + + # pw not in old_objects_to_trace. A + # repeated write_barrier before + # rawsetptr() would fix that + + lib.stm_push_root(r) + minor_collect() + r2 = lib.stm_pop_root() + check_nursery_free(r) + + pr = lib.stm_read_barrier(p) + assert r != r2 + # these will fail because pw/pr was + # not traced in the last minor_collect, + # because they were not registered in + # old_objects_to_trace. + assert getptr(pr, 0) != r + assert getptr(pr, 0) == r2 def test_id_young_to_old(): # move out of nursery with shadow original From noreply at buildbot.pypy.org Mon Jul 8 15:57:18 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 15:57:18 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: use the new copy_string_to_raw to implement str2charp. Microbenchmarks show a speedup of 31x Message-ID: <20130708135718.128721C0512@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65265:771e378d592d Date: 2013-07-08 15:56 +0200 http://bitbucket.org/pypy/pypy/changeset/771e378d592d/ Log: use the new copy_string_to_raw to implement str2charp. Microbenchmarks show a speedup of 31x diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -677,7 +677,8 @@ def make_string_mappings(strtype): if strtype is str: - from rpython.rtyper.lltypesystem.rstr import STR as STRTYPE + from rpython.rtyper.lltypesystem.rstr import (STR as STRTYPE, + copy_string_to_raw) from rpython.rtyper.annlowlevel import llstr as llstrtype from rpython.rtyper.annlowlevel import hlstr as hlstrtype TYPEP = CCHARP @@ -685,7 +686,9 @@ lastchar = '\x00' builder_class = StringBuilder else: - from rpython.rtyper.lltypesystem.rstr import UNICODE as STRTYPE + from rpython.rtyper.lltypesystem.rstr import ( + UNICODE as STRTYPE, + copy_unicode_to_raw as copy_string_to_raw) from rpython.rtyper.annlowlevel import llunicode as llstrtype from rpython.rtyper.annlowlevel import hlunicode as hlstrtype TYPEP = CWCHARP @@ -702,11 +705,9 @@ else: array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='raw', track_allocation=False) i = len(s) + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, 0, i) array[i] = lastchar - i -= 1 - while i >= 0: - array[i] = s[i] - i -= 1 return array str2charp._annenforceargs_ = [strtype, bool] diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -102,7 +102,7 @@ return copy_string_to_raw, copy_string_contents copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') -copy_unicode_do_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, +copy_unicode_to_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() From noreply at buildbot.pypy.org Mon Jul 8 15:57:16 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 15:57:16 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: 1) simplify the implementation of _new_copy_contents_fun, because we can assume to have always the same SRC and DST type; 2) add a new function to copy the content of an rpython string to a raw buffer, using llmemory.raw_memcopy, which is much faster than doing the copy char-by-char Message-ID: <20130708135716.D52031C0328@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65264:db736b323df1 Date: 2013-07-08 15:43 +0200 http://bitbucket.org/pypy/pypy/changeset/db736b323df1/ Log: 1) simplify the implementation of _new_copy_contents_fun, because we can assume to have always the same SRC and DST type; 2) add a new function to copy the content of an rpython string to a raw buffer, using llmemory.raw_memcopy, which is much faster than doing the copy char-by-char diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -49,16 +49,18 @@ def emptyunicodefun(): return emptyunicode -def _new_copy_contents_fun(SRC_TP, DST_TP, CHAR_TP, name): - def _str_ofs_src(item): - return (llmemory.offsetof(SRC_TP, 'chars') + - llmemory.itemoffsetof(SRC_TP.chars, 0) + +def _new_copy_contents_fun(STR_TP, CHAR_TP, name): + def _str_ofs(item): + return (llmemory.offsetof(STR_TP, 'chars') + + llmemory.itemoffsetof(STR_TP.chars, 0) + llmemory.sizeof(CHAR_TP) * item) - def _str_ofs_dst(item): - return (llmemory.offsetof(DST_TP, 'chars') + - llmemory.itemoffsetof(DST_TP.chars, 0) + - llmemory.sizeof(CHAR_TP) * item) + @signature(types.any(), types.int(), returns=types.any()) + def _get_raw_buf(src, ofs): + assert typeOf(src).TO == STR_TP + assert ofs >= 0 + return llmemory.cast_ptr_to_adr(src) + _str_ofs(ofs) + _get_raw_buf._always_inline_ = True @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @signature(types.any(), types.any(), types.int(), types.int(), types.int(), returns=types.none()) @@ -71,22 +73,37 @@ # because it might move the strings. The keepalive_until_here() # are obscurely essential to make sure that the strings stay alive # longer than the raw_memcopy(). - assert typeOf(src).TO == SRC_TP - assert typeOf(dst).TO == DST_TP - assert srcstart >= 0 - assert dststart >= 0 assert length >= 0 - src = llmemory.cast_ptr_to_adr(src) + _str_ofs_src(srcstart) - dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs_dst(dststart) + src = _get_raw_buf(src, srcstart) + dst = _get_raw_buf(dst, dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) keepalive_until_here(src) keepalive_until_here(dst) copy_string_contents._always_inline_ = True - return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) + copy_string_contents = func_with_new_name(copy_string_contents, + 'copy_%s_contents' % name) -copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') -copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, - 'unicode') + def copy_string_to_raw(src, ptrdst, srcstart, length): + """ + Copies 'length' characters from the 'src' string to the 'ptrdst' + buffer, starting at position 'srcstart'. + 'ptrdst' must be a non-gc Array of Char. + """ + # xxx Warning: same note as above apply: don't do this at home + assert length >= 0 + src = _get_raw_buf(src, srcstart) + adr = llmemory.cast_ptr_to_adr(ptrdst) + dstbuf = adr + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0) + llmemory.raw_memcopy(src, dstbuf, llmemory.sizeof(CHAR_TP) * length) + keepalive_until_here(src) + copy_string_to_raw._always_inline_ = True + copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) + + return copy_string_to_raw, copy_string_contents + +copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') +copy_unicode_do_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, + UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() CONST_UNICODE_CACHE = WeakValueDictionary() diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -1118,6 +1118,30 @@ res = self.interpret(f, [5]) assert res == 0 + def test_copy_string_to_raw(self): + from rpython.rtyper.lltypesystem import lltype, llmemory + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + + def f(buf, n): + s = 'abc' * n + ll_s = llstr(s) + copy_string_to_raw(ll_s, buf, 0, n*3) + + TP = lltype.Array(lltype.Char) + array = lltype.malloc(TP, 12, flavor='raw') + f(array, 4) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') + + array = lltype.malloc(TP, 12, flavor='raw') + self.interpret(f, [array, 4]) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') + + + + class TestOOtype(BaseTestRstr, OORtypeMixin): pass From noreply at buildbot.pypy.org Mon Jul 8 16:04:08 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 16:04:08 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: use copy_string_to_raw to implement also get_nonmovingbuffer: this should improve our I/O writing speed Message-ID: <20130708140408.7A62F1C0512@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65266:fe711f557c8f Date: 2013-07-08 16:01 +0200 http://bitbucket.org/pypy/pypy/changeset/fe711f557c8f/ Log: use copy_string_to_raw to implement also get_nonmovingbuffer: this should improve our I/O writing speed diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -740,14 +740,14 @@ string is already nonmovable. Must be followed by a free_nonmovingbuffer call. """ + lldata = llstrtype(data) if rgc.can_move(data): count = len(data) buf = lltype.malloc(TYPEP.TO, count, flavor='raw') - for i in range(count): - buf[i] = data[i] + copy_string_to_raw(lldata, buf, 0, count) return buf else: - data_start = cast_ptr_to_adr(llstrtype(data)) + \ + data_start = cast_ptr_to_adr(lldata) + \ offsetof(STRTYPE, 'chars') + itemoffsetof(STRTYPE.chars, 0) return cast(TYPEP, data_start) get_nonmovingbuffer._annenforceargs_ = [strtype] From noreply at buildbot.pypy.org Mon Jul 8 17:06:07 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 17:06:07 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: close about-to-be-merged branch Message-ID: <20130708150607.914521C315D@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r65267:408a51b5d094 Date: 2013-07-08 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/408a51b5d094/ Log: close about-to-be-merged branch diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -76,18 +76,22 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing - ## from pypy.tool import gdb_pypy - ## reload(gdb_pypy) - ## gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache - ## self.__class__ = gdb_pypy.RPyType + from pypy.tool import gdb_pypy + reload(gdb_pypy) + gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache + self.__class__ = gdb_pypy.RPyType print self.do_invoke(arg, from_tty) def do_invoke(self, arg, from_tty): - obj = self.gdb.parse_and_eval(arg) - hdr = lookup(obj, '_gcheader') - tid = hdr['h_tid'] - offset = tid & 0xFFFFFFFF # 64bit only - offset = int(offset) # convert from gdb.Value to python int + try: + offset = int(arg) + except ValueError: + obj = self.gdb.parse_and_eval(arg) + hdr = lookup(obj, '_gcheader') + tid = hdr['h_tid'] + offset = tid & 0xFFFFFFFF # 64bit only + offset = int(offset) # convert from gdb.Value to python int + typeids = self.get_typeids() if offset in typeids: return typeids[offset] From noreply at buildbot.pypy.org Mon Jul 8 17:06:09 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 17:06:09 +0200 (CEST) Subject: [pypy-commit] pypy default: merge the fastjson branch, which includes the new _pypyjson module written in RPython and provides a much faster json decoding Message-ID: <20130708150609.4920D1C315D@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65268:1a19c9b44738 Date: 2013-07-08 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/1a19c9b44738/ Log: merge the fastjson branch, which includes the new _pypyjson module written in RPython and provides a much faster json decoding diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/__init__.py @@ -0,0 +1,10 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """fast json implementation""" + + appleveldefs = {} + + interpleveldefs = { + 'loads' : 'interp_decoder.loads', + } diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -0,0 +1,404 @@ +import sys +import math +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize +from rpython.rlib import rfloat +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import unicodehelper +from rpython.rtyper.annlowlevel import llstr, hlunicode + +OVF_DIGITS = len(str(sys.maxint)) + +def is_whitespace(ch): + return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' + +# precomputing negative powers of 10 is MUCH faster than using e.g. math.pow +# at runtime +NEG_POW_10 = [10.0**-i for i in range(16)] +def neg_pow_10(x, exp): + if exp >= len(NEG_POW_10): + return 0.0 + return x * NEG_POW_10[exp] + +def strslice2unicode_latin1(s, start, end): + """ + Convert s[start:end] to unicode. s is supposed to be an RPython string + encoded in latin-1, which means that the numeric value of each char is the + same as the corresponding unicode code point. + + Internally it's implemented at the level of low-level helpers, to avoid + the extra copy we would need if we take the actual slice first. + + No bound checking is done, use carefully. + """ + from rpython.rtyper.annlowlevel import llstr, hlunicode + from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE + from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar + length = end-start + ll_s = llstr(s) + ll_res = malloc(UNICODE, length) + ll_res.hash = 0 + for i in range(length): + ch = ll_s.chars[start+i] + ll_res.chars[i] = cast_primitive(UniChar, ch) + return hlunicode(ll_res) + +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 +class JSONDecoder(object): + def __init__(self, space, s): + self.space = space + self.s = s + # we put our string in a raw buffer so: + # 1) we automatically get the '\0' sentinel at the end of the string, + # which means that we never have to check for the "end of string" + # 2) we can pass the buffer directly to strtod + self.ll_chars = rffi.str2charp(s) + self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + self.pos = 0 + self.last_type = TYPE_UNKNOWN + + def close(self): + rffi.free_charp(self.ll_chars) + lltype.free(self.end_ptr, flavor='raw') + + def getslice(self, start, end): + assert start >= 0 + assert end >= 0 + return self.s[start:end] + + def skip_whitespace(self, i): + while True: + ch = self.ll_chars[i] + if is_whitespace(ch): + i+=1 + else: + break + return i + + @specialize.arg(1) + def _raise(self, msg, *args): + raise operationerrfmt(self.space.w_ValueError, msg, *args) + + def decode_any(self, i): + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + if ch == '"': + return self.decode_string(i+1) + elif ch == '[': + return self.decode_array(i+1) + elif ch == '{': + return self.decode_object(i+1) + elif ch == 'n': + return self.decode_null(i+1) + elif ch == 't': + return self.decode_true(i+1) + elif ch == 'f': + return self.decode_false(i+1) + elif ch == 'I': + return self.decode_infinity(i+1) + elif ch == 'N': + return self.decode_nan(i+1) + elif ch == '-': + if self.ll_chars[i+1] == 'I': + return self.decode_infinity(i+2, sign=-1) + return self.decode_numeric(i) + elif ch.isdigit(): + return self.decode_numeric(i) + else: + self._raise("No JSON object could be decoded: unexpected '%s' at char %d", + ch, self.pos) + + def decode_null(self, i): + if (self.ll_chars[i] == 'u' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 'l'): + self.pos = i+3 + return self.space.w_None + self._raise("Error when decoding null at char %d", i) + + def decode_true(self, i): + if (self.ll_chars[i] == 'r' and + self.ll_chars[i+1] == 'u' and + self.ll_chars[i+2] == 'e'): + self.pos = i+3 + return self.space.w_True + self._raise("Error when decoding true at char %d", i) + + def decode_false(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 's' and + self.ll_chars[i+3] == 'e'): + self.pos = i+4 + return self.space.w_False + self._raise("Error when decoding false at char %d", i) + + def decode_infinity(self, i, sign=1): + if (self.ll_chars[i] == 'n' and + self.ll_chars[i+1] == 'f' and + self.ll_chars[i+2] == 'i' and + self.ll_chars[i+3] == 'n' and + self.ll_chars[i+4] == 'i' and + self.ll_chars[i+5] == 't' and + self.ll_chars[i+6] == 'y'): + self.pos = i+7 + return self.space.wrap(rfloat.INFINITY * sign) + self._raise("Error when decoding Infinity at char %d", i) + + def decode_nan(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'N'): + self.pos = i+2 + return self.space.wrap(rfloat.NAN) + self._raise("Error when decoding NaN at char %d", i) + + def decode_numeric(self, i): + start = i + i, ovf_maybe, intval = self.parse_integer(i) + # + # check for the optional fractional part + ch = self.ll_chars[i] + if ch == '.': + if not self.ll_chars[i+1].isdigit(): + self._raise("Expected digit at char %d", i+1) + return self.decode_float(start) + elif ch == 'e' or ch == 'E': + return self.decode_float(start) + elif ovf_maybe: + return self.decode_int_slow(start) + + self.pos = i + return self.space.wrap(intval) + + def decode_float(self, i): + from rpython.rlib import rdtoa + start = rffi.ptradd(self.ll_chars, i) + floatval = rdtoa.dg_strtod(start, self.end_ptr) + diff = rffi.cast(rffi.LONG, self.end_ptr[0]) - rffi.cast(rffi.LONG, start) + self.pos = i + diff + return self.space.wrap(floatval) + + def decode_int_slow(self, i): + start = i + if self.ll_chars[i] == '-': + i += 1 + while self.ll_chars[i].isdigit(): + i += 1 + s = self.getslice(start, i) + self.pos = i + return self.space.call_function(self.space.w_int, self.space.wrap(s)) + + def parse_integer(self, i): + "Parse a decimal number with an optional minus sign" + sign = 1 + # parse the sign + if self.ll_chars[i] == '-': + sign = -1 + i += 1 + elif self.ll_chars[i] == '+': + i += 1 + # + if self.ll_chars[i] == '0': + i += 1 + return i, False, 0 + + intval = 0 + start = i + while True: + ch = self.ll_chars[i] + if ch.isdigit(): + intval = intval*10 + ord(ch)-ord('0') + i += 1 + else: + break + count = i - start + if count == 0: + self._raise("Expected digit at char %d", i) + # if the number has more digits than OVF_DIGITS, it might have + # overflowed + ovf_maybe = (count >= OVF_DIGITS) + return i, ovf_maybe, sign * intval + parse_integer._always_inline_ = True + + def decode_array(self, i): + w_list = self.space.newlist([]) + start = i + count = 0 + i = self.skip_whitespace(start) + if self.ll_chars[i] == ']': + self.pos = i+1 + return w_list + # + while True: + w_item = self.decode_any(i) + i = self.pos + self.space.call_method(w_list, 'append', w_item) + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + i += 1 + if ch == ']': + self.pos = i + return w_list + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated array starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding array (char %d)", + ch, self.pos) + + def decode_object(self, i): + start = i + w_dict = self.space.newdict() + # + i = self.skip_whitespace(i) + if self.ll_chars[i] == '}': + self.pos = i+1 + return w_dict + # + while True: + # parse a key: value + self.last_type = TYPE_UNKNOWN + w_name = self.decode_any(i) + if self.last_type != TYPE_STRING: + self._raise("Key name must be string for object starting at char %d", start) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + if ch != ':': + self._raise("No ':' found at char %d", i) + i += 1 + i = self.skip_whitespace(i) + # + w_value = self.decode_any(i) + self.space.setitem(w_dict, w_name, w_value) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + i += 1 + if ch == '}': + self.pos = i + return w_dict + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated object starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding object (char %d)", + ch, self.pos) + + + def decode_string(self, i): + start = i + bits = 0 + while True: + # this loop is a fast path for strings which do not contain escape + # characters + ch = self.ll_chars[i] + i += 1 + bits |= ord(ch) + if ch == '"': + if bits & 0x80: + # the 8th bit is set, it's an utf8 strnig + content_utf8 = self.getslice(start, i-1) + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + else: + # ascii only, fast path (ascii is a strict subset of + # latin1, and we already checked that all the chars are < + # 128) + content_unicode = strslice2unicode_latin1(self.s, start, i-1) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + content_so_far = self.getslice(start, i-1) + self.pos = i-1 + return self.decode_string_escaped(start, content_so_far) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + + + def decode_string_escaped(self, start, content_so_far): + builder = StringBuilder(len(content_so_far)*2) # just an estimate + builder.append(content_so_far) + i = self.pos + while True: + ch = self.ll_chars[i] + i += 1 + if ch == '"': + content_utf8 = builder.build() + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + i = self.decode_escape_sequence(i, builder) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + else: + builder.append_multiple_char(ch, 1) # we should implement append_char + + def decode_escape_sequence(self, i, builder): + ch = self.ll_chars[i] + i += 1 + put = builder.append_multiple_char + if ch == '\\': put('\\', 1) + elif ch == '"': put('"' , 1) + elif ch == '/': put('/' , 1) + elif ch == 'b': put('\b', 1) + elif ch == 'f': put('\f', 1) + elif ch == 'n': put('\n', 1) + elif ch == 'r': put('\r', 1) + elif ch == 't': put('\t', 1) + elif ch == 'u': + return self.decode_escape_sequence_unicode(i, builder) + else: + self._raise("Invalid \\escape: %s (char %d)", ch, self.pos-1) + return i + + def decode_escape_sequence_unicode(self, i, builder): + # at this point we are just after the 'u' of the \u1234 sequence. + start = i + i += 4 + hexdigits = self.getslice(start, i) + try: + val = int(hexdigits, 16) + if val & 0xfc00 == 0xd800: + # surrogate pair + val = self.decode_surrogate_pair(i, val) + i += 6 + except ValueError: + self._raise("Invalid \uXXXX escape (char %d)", i-1) + return # help the annotator to know that we'll never go beyond + # this point + # + uchr = unichr(val) + utf8_ch = unicodehelper.encode_utf8(self.space, uchr) + builder.append(utf8_ch) + return i + + def decode_surrogate_pair(self, i, highsurr): + if self.ll_chars[i] != '\\' or self.ll_chars[i+1] != 'u': + self._raise("Unpaired high surrogate at char %d", i) + i += 2 + hexdigits = self.getslice(i, i+4) + lowsurr = int(hexdigits, 16) # the possible ValueError is caugth by the caller + return 0x10000 + (((highsurr - 0xd800) << 10) | (lowsurr - 0xdc00)) + +def loads(space, w_s): + if space.isinstance_w(w_s, space.w_unicode): + raise OperationError(space.w_TypeError, + space.wrap("Expected utf8-encoded str, got unicode")) + s = space.str_w(w_s) + decoder = JSONDecoder(space, s) + try: + w_res = decoder.decode_any(0) + i = decoder.skip_whitespace(decoder.pos) + if i < len(s): + start = i + end = len(s) - 1 + raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) + return w_res + finally: + decoder.close() diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/targetjson.py @@ -0,0 +1,143 @@ +import sys +import py +ROOT = py.path.local(__file__).dirpath('..', '..', '..') +sys.path.insert(0, str(ROOT)) + +import time +from rpython.rlib.streamio import open_file_as_stream +from pypy.interpreter.error import OperationError +from pypy.module._pypyjson.interp_decoder import loads + + + +## MSG = open('msg.json').read() + +class W_Root(object): + pass + +class W_Dict(W_Root): + def __init__(self): + self.dictval = {} + +class W_Unicode(W_Root): + def __init__(self, x): + self.unival = x + +class W_String(W_Root): + def __init__(self, x): + self.strval = x + +class W_Int(W_Root): + def __init__(self, x): + self.intval = x + +class W_Float(W_Root): + def __init__(self, x): + self.floatval = x + +class W_List(W_Root): + def __init__(self): + self.listval = [] + +class W_Singleton(W_Root): + def __init__(self, name): + self.name = name + +class FakeSpace(object): + + w_None = W_Singleton('None') + w_True = W_Singleton('True') + w_False = W_Singleton('False') + w_ValueError = W_Singleton('ValueError') + w_UnicodeDecodeError = W_Singleton('UnicodeDecodeError') + w_unicode = W_Unicode + w_int = W_Int + w_float = W_Float + + def newtuple(self, items): + return None + + def newdict(self): + return W_Dict() + + def newlist(self, items): + return W_List() + + def isinstance_w(self, w_x, w_type): + return isinstance(w_x, w_type) + + def str_w(self, w_x): + assert isinstance(w_x, W_String) + return w_x.strval + + def call_method(self, obj, name, arg): + assert name == 'append' + assert isinstance(obj, W_List) + obj.listval.append(arg) + call_method._dont_inline_ = True + + def call_function(self, w_func, *args_w): + return self.w_None # XXX + + def setitem(self, d, key, value): + assert isinstance(d, W_Dict) + assert isinstance(key, W_Unicode) + d.dictval[key.unival] = value + + def wrapunicode(self, x): + return W_Unicode(x) + + def wrapint(self, x): + return W_Int(x) + + def wrapfloat(self, x): + return W_Float(x) + + def wrap(self, x): + if isinstance(x, int): + return W_Int(x) + elif isinstance(x, float): + return W_Float(x) + ## elif isinstance(x, str): + ## assert False + else: + return W_Unicode(unicode(x)) + wrap._annspecialcase_ = "specialize:argtype(1)" + + +fakespace = FakeSpace() + +def myloads(msg): + return loads(fakespace, W_String(msg)) + + +def bench(title, N, fn, arg): + a = time.clock() + for i in range(N): + res = fn(arg) + b = time.clock() + print title, (b-a) / N * 1000 + +def entry_point(argv): + if len(argv) != 3: + print 'Usage: %s FILE n' % argv[0] + return 1 + filename = argv[1] + N = int(argv[2]) + f = open_file_as_stream(filename) + msg = f.readall() + + try: + bench('loads ', N, myloads, msg) + except OperationError, e: + print 'Error', e._compute_value(fakespace) + + return 0 + +# _____ Define and setup target ___ + +def target(*args): + return entry_point, None + +if __name__ == '__main__': + entry_point(sys.argv) diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -0,0 +1,188 @@ +# -*- encoding: utf-8 -*- +import py +from pypy.module._pypyjson.interp_decoder import JSONDecoder + +def test_skip_whitespace(): + s = ' hello ' + dec = JSONDecoder('fake space', s) + assert dec.pos == 0 + assert dec.skip_whitespace(0) == 3 + assert dec.skip_whitespace(3) == 3 + assert dec.skip_whitespace(8) == len(s) + dec.close() + + + +class AppTest(object): + spaceconfig = {"objspace.usemodules._pypyjson": True} + + def test_raise_on_unicode(self): + import _pypyjson + raises(TypeError, _pypyjson.loads, u"42") + + + def test_decode_constants(self): + import _pypyjson + assert _pypyjson.loads('null') is None + raises(ValueError, _pypyjson.loads, 'nul') + raises(ValueError, _pypyjson.loads, 'nu') + raises(ValueError, _pypyjson.loads, 'n') + raises(ValueError, _pypyjson.loads, 'nuXX') + # + assert _pypyjson.loads('true') is True + raises(ValueError, _pypyjson.loads, 'tru') + raises(ValueError, _pypyjson.loads, 'tr') + raises(ValueError, _pypyjson.loads, 't') + raises(ValueError, _pypyjson.loads, 'trXX') + # + assert _pypyjson.loads('false') is False + raises(ValueError, _pypyjson.loads, 'fals') + raises(ValueError, _pypyjson.loads, 'fal') + raises(ValueError, _pypyjson.loads, 'fa') + raises(ValueError, _pypyjson.loads, 'f') + raises(ValueError, _pypyjson.loads, 'falXX') + + + def test_decode_string(self): + import _pypyjson + res = _pypyjson.loads('"hello"') + assert res == u'hello' + assert type(res) is unicode + + def test_decode_string_utf8(self): + import _pypyjson + s = u'àèìòù' + res = _pypyjson.loads('"%s"' % s.encode('utf-8')) + assert res == s + + def test_skip_whitespace(self): + import _pypyjson + s = ' "hello" ' + assert _pypyjson.loads(s) == u'hello' + s = ' "hello" extra' + raises(ValueError, "_pypyjson.loads(s)") + + def test_unterminated_string(self): + import _pypyjson + s = '"hello' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence(self): + import _pypyjson + assert _pypyjson.loads(r'"\\"') == u'\\' + assert _pypyjson.loads(r'"\""') == u'"' + assert _pypyjson.loads(r'"\/"') == u'/' + assert _pypyjson.loads(r'"\b"') == u'\b' + assert _pypyjson.loads(r'"\f"') == u'\f' + assert _pypyjson.loads(r'"\n"') == u'\n' + assert _pypyjson.loads(r'"\r"') == u'\r' + assert _pypyjson.loads(r'"\t"') == u'\t' + + def test_escape_sequence_in_the_middle(self): + import _pypyjson + s = r'"hello\nworld"' + assert _pypyjson.loads(s) == "hello\nworld" + + def test_unterminated_string_after_escape_sequence(self): + import _pypyjson + s = r'"hello\nworld' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence_unicode(self): + import _pypyjson + s = r'"\u1234"' + assert _pypyjson.loads(s) == u'\u1234' + + def test_invalid_utf_8(self): + import _pypyjson + s = '"\xe0"' # this is an invalid UTF8 sequence inside a string + raises(UnicodeDecodeError, "_pypyjson.loads(s)") + + def test_decode_numeric(self): + import sys + import _pypyjson + def check(s, val): + res = _pypyjson.loads(s) + assert type(res) is type(val) + assert res == val + # + check('42', 42) + check('-42', -42) + check('42.123', 42.123) + check('42E0', 42.0) + check('42E3', 42000.0) + check('42E-1', 4.2) + check('42E+1', 420.0) + check('42.123E3', 42123.0) + check('0', 0) + check('-0', 0) + check('0.123', 0.123) + check('0E3', 0.0) + check('5E0001', 50.0) + check(str(1 << 32), 1 << 32) + check(str(1 << 64), 1 << 64) + # + x = str(sys.maxint+1) + '.123' + check(x, float(x)) + x = str(sys.maxint+1) + 'E1' + check(x, float(x)) + x = str(sys.maxint+1) + 'E-1' + check(x, float(x)) + # + check('1E400', float('inf')) + ## # these are non-standard but supported by CPython json + check('Infinity', float('inf')) + check('-Infinity', float('-inf')) + + def test_nan(self): + import math + import _pypyjson + res = _pypyjson.loads('NaN') + assert math.isnan(res) + + def test_decode_numeric_invalid(self): + import _pypyjson + def error(s): + raises(ValueError, _pypyjson.loads, s) + # + error(' 42 abc') + error('.123') + error('+123') + error('12.') + error('12.-3') + error('12E') + error('12E-') + error('0123') # numbers can't start with 0 + + def test_decode_object(self): + import _pypyjson + assert _pypyjson.loads('{}') == {} + assert _pypyjson.loads('{ }') == {} + # + s = '{"hello": "world", "aaa": "bbb"}' + assert _pypyjson.loads(s) == {'hello': 'world', + 'aaa': 'bbb'} + raises(ValueError, _pypyjson.loads, '{"key"') + raises(ValueError, _pypyjson.loads, '{"key": 42') + + def test_decode_object_nonstring_key(self): + import _pypyjson + raises(ValueError, "_pypyjson.loads('{42: 43}')") + + def test_decode_array(self): + import _pypyjson + assert _pypyjson.loads('[]') == [] + assert _pypyjson.loads('[ ]') == [] + assert _pypyjson.loads('[1]') == [1] + assert _pypyjson.loads('[1, 2]') == [1, 2] + raises(ValueError, "_pypyjson.loads('[1: 2]')") + raises(ValueError, "_pypyjson.loads('[1, 2')") + raises(ValueError, """_pypyjson.loads('["extra comma",]')""") + + def test_unicode_surrogate_pair(self): + import _pypyjson + expected = u'z\U0001d120x' + res = _pypyjson.loads('"z\\ud834\\udd20x"') + assert res == expected + + diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -76,18 +76,22 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing - ## from pypy.tool import gdb_pypy - ## reload(gdb_pypy) - ## gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache - ## self.__class__ = gdb_pypy.RPyType + from pypy.tool import gdb_pypy + reload(gdb_pypy) + gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache + self.__class__ = gdb_pypy.RPyType print self.do_invoke(arg, from_tty) def do_invoke(self, arg, from_tty): - obj = self.gdb.parse_and_eval(arg) - hdr = lookup(obj, '_gcheader') - tid = hdr['h_tid'] - offset = tid & 0xFFFFFFFF # 64bit only - offset = int(offset) # convert from gdb.Value to python int + try: + offset = int(arg) + except ValueError: + obj = self.gdb.parse_and_eval(arg) + hdr = lookup(obj, '_gcheader') + tid = hdr['h_tid'] + offset = tid & 0xFFFFFFFF # 64bit only + offset = int(offset) # convert from gdb.Value to python int + typeids = self.get_typeids() if offset in typeids: return typeids[offset] From noreply at buildbot.pypy.org Mon Jul 8 17:08:37 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 17:08:37 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: hg merge default Message-ID: <20130708150837.717421C315D@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65269:4d172e0da296 Date: 2013-07-08 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/4d172e0da296/ Log: hg merge default diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/__init__.py @@ -0,0 +1,10 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """fast json implementation""" + + appleveldefs = {} + + interpleveldefs = { + 'loads' : 'interp_decoder.loads', + } diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -0,0 +1,404 @@ +import sys +import math +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize +from rpython.rlib import rfloat +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import unicodehelper +from rpython.rtyper.annlowlevel import llstr, hlunicode + +OVF_DIGITS = len(str(sys.maxint)) + +def is_whitespace(ch): + return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' + +# precomputing negative powers of 10 is MUCH faster than using e.g. math.pow +# at runtime +NEG_POW_10 = [10.0**-i for i in range(16)] +def neg_pow_10(x, exp): + if exp >= len(NEG_POW_10): + return 0.0 + return x * NEG_POW_10[exp] + +def strslice2unicode_latin1(s, start, end): + """ + Convert s[start:end] to unicode. s is supposed to be an RPython string + encoded in latin-1, which means that the numeric value of each char is the + same as the corresponding unicode code point. + + Internally it's implemented at the level of low-level helpers, to avoid + the extra copy we would need if we take the actual slice first. + + No bound checking is done, use carefully. + """ + from rpython.rtyper.annlowlevel import llstr, hlunicode + from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE + from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar + length = end-start + ll_s = llstr(s) + ll_res = malloc(UNICODE, length) + ll_res.hash = 0 + for i in range(length): + ch = ll_s.chars[start+i] + ll_res.chars[i] = cast_primitive(UniChar, ch) + return hlunicode(ll_res) + +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 +class JSONDecoder(object): + def __init__(self, space, s): + self.space = space + self.s = s + # we put our string in a raw buffer so: + # 1) we automatically get the '\0' sentinel at the end of the string, + # which means that we never have to check for the "end of string" + # 2) we can pass the buffer directly to strtod + self.ll_chars = rffi.str2charp(s) + self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + self.pos = 0 + self.last_type = TYPE_UNKNOWN + + def close(self): + rffi.free_charp(self.ll_chars) + lltype.free(self.end_ptr, flavor='raw') + + def getslice(self, start, end): + assert start >= 0 + assert end >= 0 + return self.s[start:end] + + def skip_whitespace(self, i): + while True: + ch = self.ll_chars[i] + if is_whitespace(ch): + i+=1 + else: + break + return i + + @specialize.arg(1) + def _raise(self, msg, *args): + raise operationerrfmt(self.space.w_ValueError, msg, *args) + + def decode_any(self, i): + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + if ch == '"': + return self.decode_string(i+1) + elif ch == '[': + return self.decode_array(i+1) + elif ch == '{': + return self.decode_object(i+1) + elif ch == 'n': + return self.decode_null(i+1) + elif ch == 't': + return self.decode_true(i+1) + elif ch == 'f': + return self.decode_false(i+1) + elif ch == 'I': + return self.decode_infinity(i+1) + elif ch == 'N': + return self.decode_nan(i+1) + elif ch == '-': + if self.ll_chars[i+1] == 'I': + return self.decode_infinity(i+2, sign=-1) + return self.decode_numeric(i) + elif ch.isdigit(): + return self.decode_numeric(i) + else: + self._raise("No JSON object could be decoded: unexpected '%s' at char %d", + ch, self.pos) + + def decode_null(self, i): + if (self.ll_chars[i] == 'u' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 'l'): + self.pos = i+3 + return self.space.w_None + self._raise("Error when decoding null at char %d", i) + + def decode_true(self, i): + if (self.ll_chars[i] == 'r' and + self.ll_chars[i+1] == 'u' and + self.ll_chars[i+2] == 'e'): + self.pos = i+3 + return self.space.w_True + self._raise("Error when decoding true at char %d", i) + + def decode_false(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 's' and + self.ll_chars[i+3] == 'e'): + self.pos = i+4 + return self.space.w_False + self._raise("Error when decoding false at char %d", i) + + def decode_infinity(self, i, sign=1): + if (self.ll_chars[i] == 'n' and + self.ll_chars[i+1] == 'f' and + self.ll_chars[i+2] == 'i' and + self.ll_chars[i+3] == 'n' and + self.ll_chars[i+4] == 'i' and + self.ll_chars[i+5] == 't' and + self.ll_chars[i+6] == 'y'): + self.pos = i+7 + return self.space.wrap(rfloat.INFINITY * sign) + self._raise("Error when decoding Infinity at char %d", i) + + def decode_nan(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'N'): + self.pos = i+2 + return self.space.wrap(rfloat.NAN) + self._raise("Error when decoding NaN at char %d", i) + + def decode_numeric(self, i): + start = i + i, ovf_maybe, intval = self.parse_integer(i) + # + # check for the optional fractional part + ch = self.ll_chars[i] + if ch == '.': + if not self.ll_chars[i+1].isdigit(): + self._raise("Expected digit at char %d", i+1) + return self.decode_float(start) + elif ch == 'e' or ch == 'E': + return self.decode_float(start) + elif ovf_maybe: + return self.decode_int_slow(start) + + self.pos = i + return self.space.wrap(intval) + + def decode_float(self, i): + from rpython.rlib import rdtoa + start = rffi.ptradd(self.ll_chars, i) + floatval = rdtoa.dg_strtod(start, self.end_ptr) + diff = rffi.cast(rffi.LONG, self.end_ptr[0]) - rffi.cast(rffi.LONG, start) + self.pos = i + diff + return self.space.wrap(floatval) + + def decode_int_slow(self, i): + start = i + if self.ll_chars[i] == '-': + i += 1 + while self.ll_chars[i].isdigit(): + i += 1 + s = self.getslice(start, i) + self.pos = i + return self.space.call_function(self.space.w_int, self.space.wrap(s)) + + def parse_integer(self, i): + "Parse a decimal number with an optional minus sign" + sign = 1 + # parse the sign + if self.ll_chars[i] == '-': + sign = -1 + i += 1 + elif self.ll_chars[i] == '+': + i += 1 + # + if self.ll_chars[i] == '0': + i += 1 + return i, False, 0 + + intval = 0 + start = i + while True: + ch = self.ll_chars[i] + if ch.isdigit(): + intval = intval*10 + ord(ch)-ord('0') + i += 1 + else: + break + count = i - start + if count == 0: + self._raise("Expected digit at char %d", i) + # if the number has more digits than OVF_DIGITS, it might have + # overflowed + ovf_maybe = (count >= OVF_DIGITS) + return i, ovf_maybe, sign * intval + parse_integer._always_inline_ = True + + def decode_array(self, i): + w_list = self.space.newlist([]) + start = i + count = 0 + i = self.skip_whitespace(start) + if self.ll_chars[i] == ']': + self.pos = i+1 + return w_list + # + while True: + w_item = self.decode_any(i) + i = self.pos + self.space.call_method(w_list, 'append', w_item) + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + i += 1 + if ch == ']': + self.pos = i + return w_list + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated array starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding array (char %d)", + ch, self.pos) + + def decode_object(self, i): + start = i + w_dict = self.space.newdict() + # + i = self.skip_whitespace(i) + if self.ll_chars[i] == '}': + self.pos = i+1 + return w_dict + # + while True: + # parse a key: value + self.last_type = TYPE_UNKNOWN + w_name = self.decode_any(i) + if self.last_type != TYPE_STRING: + self._raise("Key name must be string for object starting at char %d", start) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + if ch != ':': + self._raise("No ':' found at char %d", i) + i += 1 + i = self.skip_whitespace(i) + # + w_value = self.decode_any(i) + self.space.setitem(w_dict, w_name, w_value) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + i += 1 + if ch == '}': + self.pos = i + return w_dict + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated object starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding object (char %d)", + ch, self.pos) + + + def decode_string(self, i): + start = i + bits = 0 + while True: + # this loop is a fast path for strings which do not contain escape + # characters + ch = self.ll_chars[i] + i += 1 + bits |= ord(ch) + if ch == '"': + if bits & 0x80: + # the 8th bit is set, it's an utf8 strnig + content_utf8 = self.getslice(start, i-1) + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + else: + # ascii only, fast path (ascii is a strict subset of + # latin1, and we already checked that all the chars are < + # 128) + content_unicode = strslice2unicode_latin1(self.s, start, i-1) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + content_so_far = self.getslice(start, i-1) + self.pos = i-1 + return self.decode_string_escaped(start, content_so_far) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + + + def decode_string_escaped(self, start, content_so_far): + builder = StringBuilder(len(content_so_far)*2) # just an estimate + builder.append(content_so_far) + i = self.pos + while True: + ch = self.ll_chars[i] + i += 1 + if ch == '"': + content_utf8 = builder.build() + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + i = self.decode_escape_sequence(i, builder) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + else: + builder.append_multiple_char(ch, 1) # we should implement append_char + + def decode_escape_sequence(self, i, builder): + ch = self.ll_chars[i] + i += 1 + put = builder.append_multiple_char + if ch == '\\': put('\\', 1) + elif ch == '"': put('"' , 1) + elif ch == '/': put('/' , 1) + elif ch == 'b': put('\b', 1) + elif ch == 'f': put('\f', 1) + elif ch == 'n': put('\n', 1) + elif ch == 'r': put('\r', 1) + elif ch == 't': put('\t', 1) + elif ch == 'u': + return self.decode_escape_sequence_unicode(i, builder) + else: + self._raise("Invalid \\escape: %s (char %d)", ch, self.pos-1) + return i + + def decode_escape_sequence_unicode(self, i, builder): + # at this point we are just after the 'u' of the \u1234 sequence. + start = i + i += 4 + hexdigits = self.getslice(start, i) + try: + val = int(hexdigits, 16) + if val & 0xfc00 == 0xd800: + # surrogate pair + val = self.decode_surrogate_pair(i, val) + i += 6 + except ValueError: + self._raise("Invalid \uXXXX escape (char %d)", i-1) + return # help the annotator to know that we'll never go beyond + # this point + # + uchr = unichr(val) + utf8_ch = unicodehelper.encode_utf8(self.space, uchr) + builder.append(utf8_ch) + return i + + def decode_surrogate_pair(self, i, highsurr): + if self.ll_chars[i] != '\\' or self.ll_chars[i+1] != 'u': + self._raise("Unpaired high surrogate at char %d", i) + i += 2 + hexdigits = self.getslice(i, i+4) + lowsurr = int(hexdigits, 16) # the possible ValueError is caugth by the caller + return 0x10000 + (((highsurr - 0xd800) << 10) | (lowsurr - 0xdc00)) + +def loads(space, w_s): + if space.isinstance_w(w_s, space.w_unicode): + raise OperationError(space.w_TypeError, + space.wrap("Expected utf8-encoded str, got unicode")) + s = space.str_w(w_s) + decoder = JSONDecoder(space, s) + try: + w_res = decoder.decode_any(0) + i = decoder.skip_whitespace(decoder.pos) + if i < len(s): + start = i + end = len(s) - 1 + raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) + return w_res + finally: + decoder.close() diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/targetjson.py @@ -0,0 +1,143 @@ +import sys +import py +ROOT = py.path.local(__file__).dirpath('..', '..', '..') +sys.path.insert(0, str(ROOT)) + +import time +from rpython.rlib.streamio import open_file_as_stream +from pypy.interpreter.error import OperationError +from pypy.module._pypyjson.interp_decoder import loads + + + +## MSG = open('msg.json').read() + +class W_Root(object): + pass + +class W_Dict(W_Root): + def __init__(self): + self.dictval = {} + +class W_Unicode(W_Root): + def __init__(self, x): + self.unival = x + +class W_String(W_Root): + def __init__(self, x): + self.strval = x + +class W_Int(W_Root): + def __init__(self, x): + self.intval = x + +class W_Float(W_Root): + def __init__(self, x): + self.floatval = x + +class W_List(W_Root): + def __init__(self): + self.listval = [] + +class W_Singleton(W_Root): + def __init__(self, name): + self.name = name + +class FakeSpace(object): + + w_None = W_Singleton('None') + w_True = W_Singleton('True') + w_False = W_Singleton('False') + w_ValueError = W_Singleton('ValueError') + w_UnicodeDecodeError = W_Singleton('UnicodeDecodeError') + w_unicode = W_Unicode + w_int = W_Int + w_float = W_Float + + def newtuple(self, items): + return None + + def newdict(self): + return W_Dict() + + def newlist(self, items): + return W_List() + + def isinstance_w(self, w_x, w_type): + return isinstance(w_x, w_type) + + def str_w(self, w_x): + assert isinstance(w_x, W_String) + return w_x.strval + + def call_method(self, obj, name, arg): + assert name == 'append' + assert isinstance(obj, W_List) + obj.listval.append(arg) + call_method._dont_inline_ = True + + def call_function(self, w_func, *args_w): + return self.w_None # XXX + + def setitem(self, d, key, value): + assert isinstance(d, W_Dict) + assert isinstance(key, W_Unicode) + d.dictval[key.unival] = value + + def wrapunicode(self, x): + return W_Unicode(x) + + def wrapint(self, x): + return W_Int(x) + + def wrapfloat(self, x): + return W_Float(x) + + def wrap(self, x): + if isinstance(x, int): + return W_Int(x) + elif isinstance(x, float): + return W_Float(x) + ## elif isinstance(x, str): + ## assert False + else: + return W_Unicode(unicode(x)) + wrap._annspecialcase_ = "specialize:argtype(1)" + + +fakespace = FakeSpace() + +def myloads(msg): + return loads(fakespace, W_String(msg)) + + +def bench(title, N, fn, arg): + a = time.clock() + for i in range(N): + res = fn(arg) + b = time.clock() + print title, (b-a) / N * 1000 + +def entry_point(argv): + if len(argv) != 3: + print 'Usage: %s FILE n' % argv[0] + return 1 + filename = argv[1] + N = int(argv[2]) + f = open_file_as_stream(filename) + msg = f.readall() + + try: + bench('loads ', N, myloads, msg) + except OperationError, e: + print 'Error', e._compute_value(fakespace) + + return 0 + +# _____ Define and setup target ___ + +def target(*args): + return entry_point, None + +if __name__ == '__main__': + entry_point(sys.argv) diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -0,0 +1,188 @@ +# -*- encoding: utf-8 -*- +import py +from pypy.module._pypyjson.interp_decoder import JSONDecoder + +def test_skip_whitespace(): + s = ' hello ' + dec = JSONDecoder('fake space', s) + assert dec.pos == 0 + assert dec.skip_whitespace(0) == 3 + assert dec.skip_whitespace(3) == 3 + assert dec.skip_whitespace(8) == len(s) + dec.close() + + + +class AppTest(object): + spaceconfig = {"objspace.usemodules._pypyjson": True} + + def test_raise_on_unicode(self): + import _pypyjson + raises(TypeError, _pypyjson.loads, u"42") + + + def test_decode_constants(self): + import _pypyjson + assert _pypyjson.loads('null') is None + raises(ValueError, _pypyjson.loads, 'nul') + raises(ValueError, _pypyjson.loads, 'nu') + raises(ValueError, _pypyjson.loads, 'n') + raises(ValueError, _pypyjson.loads, 'nuXX') + # + assert _pypyjson.loads('true') is True + raises(ValueError, _pypyjson.loads, 'tru') + raises(ValueError, _pypyjson.loads, 'tr') + raises(ValueError, _pypyjson.loads, 't') + raises(ValueError, _pypyjson.loads, 'trXX') + # + assert _pypyjson.loads('false') is False + raises(ValueError, _pypyjson.loads, 'fals') + raises(ValueError, _pypyjson.loads, 'fal') + raises(ValueError, _pypyjson.loads, 'fa') + raises(ValueError, _pypyjson.loads, 'f') + raises(ValueError, _pypyjson.loads, 'falXX') + + + def test_decode_string(self): + import _pypyjson + res = _pypyjson.loads('"hello"') + assert res == u'hello' + assert type(res) is unicode + + def test_decode_string_utf8(self): + import _pypyjson + s = u'àèìòù' + res = _pypyjson.loads('"%s"' % s.encode('utf-8')) + assert res == s + + def test_skip_whitespace(self): + import _pypyjson + s = ' "hello" ' + assert _pypyjson.loads(s) == u'hello' + s = ' "hello" extra' + raises(ValueError, "_pypyjson.loads(s)") + + def test_unterminated_string(self): + import _pypyjson + s = '"hello' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence(self): + import _pypyjson + assert _pypyjson.loads(r'"\\"') == u'\\' + assert _pypyjson.loads(r'"\""') == u'"' + assert _pypyjson.loads(r'"\/"') == u'/' + assert _pypyjson.loads(r'"\b"') == u'\b' + assert _pypyjson.loads(r'"\f"') == u'\f' + assert _pypyjson.loads(r'"\n"') == u'\n' + assert _pypyjson.loads(r'"\r"') == u'\r' + assert _pypyjson.loads(r'"\t"') == u'\t' + + def test_escape_sequence_in_the_middle(self): + import _pypyjson + s = r'"hello\nworld"' + assert _pypyjson.loads(s) == "hello\nworld" + + def test_unterminated_string_after_escape_sequence(self): + import _pypyjson + s = r'"hello\nworld' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence_unicode(self): + import _pypyjson + s = r'"\u1234"' + assert _pypyjson.loads(s) == u'\u1234' + + def test_invalid_utf_8(self): + import _pypyjson + s = '"\xe0"' # this is an invalid UTF8 sequence inside a string + raises(UnicodeDecodeError, "_pypyjson.loads(s)") + + def test_decode_numeric(self): + import sys + import _pypyjson + def check(s, val): + res = _pypyjson.loads(s) + assert type(res) is type(val) + assert res == val + # + check('42', 42) + check('-42', -42) + check('42.123', 42.123) + check('42E0', 42.0) + check('42E3', 42000.0) + check('42E-1', 4.2) + check('42E+1', 420.0) + check('42.123E3', 42123.0) + check('0', 0) + check('-0', 0) + check('0.123', 0.123) + check('0E3', 0.0) + check('5E0001', 50.0) + check(str(1 << 32), 1 << 32) + check(str(1 << 64), 1 << 64) + # + x = str(sys.maxint+1) + '.123' + check(x, float(x)) + x = str(sys.maxint+1) + 'E1' + check(x, float(x)) + x = str(sys.maxint+1) + 'E-1' + check(x, float(x)) + # + check('1E400', float('inf')) + ## # these are non-standard but supported by CPython json + check('Infinity', float('inf')) + check('-Infinity', float('-inf')) + + def test_nan(self): + import math + import _pypyjson + res = _pypyjson.loads('NaN') + assert math.isnan(res) + + def test_decode_numeric_invalid(self): + import _pypyjson + def error(s): + raises(ValueError, _pypyjson.loads, s) + # + error(' 42 abc') + error('.123') + error('+123') + error('12.') + error('12.-3') + error('12E') + error('12E-') + error('0123') # numbers can't start with 0 + + def test_decode_object(self): + import _pypyjson + assert _pypyjson.loads('{}') == {} + assert _pypyjson.loads('{ }') == {} + # + s = '{"hello": "world", "aaa": "bbb"}' + assert _pypyjson.loads(s) == {'hello': 'world', + 'aaa': 'bbb'} + raises(ValueError, _pypyjson.loads, '{"key"') + raises(ValueError, _pypyjson.loads, '{"key": 42') + + def test_decode_object_nonstring_key(self): + import _pypyjson + raises(ValueError, "_pypyjson.loads('{42: 43}')") + + def test_decode_array(self): + import _pypyjson + assert _pypyjson.loads('[]') == [] + assert _pypyjson.loads('[ ]') == [] + assert _pypyjson.loads('[1]') == [1] + assert _pypyjson.loads('[1, 2]') == [1, 2] + raises(ValueError, "_pypyjson.loads('[1: 2]')") + raises(ValueError, "_pypyjson.loads('[1, 2')") + raises(ValueError, """_pypyjson.loads('["extra comma",]')""") + + def test_unicode_surrogate_pair(self): + import _pypyjson + expected = u'z\U0001d120x' + res = _pypyjson.loads('"z\\ud834\\udd20x"') + assert res == expected + + diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -76,18 +76,22 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing - ## from pypy.tool import gdb_pypy - ## reload(gdb_pypy) - ## gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache - ## self.__class__ = gdb_pypy.RPyType + from pypy.tool import gdb_pypy + reload(gdb_pypy) + gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache + self.__class__ = gdb_pypy.RPyType print self.do_invoke(arg, from_tty) def do_invoke(self, arg, from_tty): - obj = self.gdb.parse_and_eval(arg) - hdr = lookup(obj, '_gcheader') - tid = hdr['h_tid'] - offset = tid & 0xFFFFFFFF # 64bit only - offset = int(offset) # convert from gdb.Value to python int + try: + offset = int(arg) + except ValueError: + obj = self.gdb.parse_and_eval(arg) + hdr = lookup(obj, '_gcheader') + tid = hdr['h_tid'] + offset = tid & 0xFFFFFFFF # 64bit only + offset = int(offset) # convert from gdb.Value to python int + typeids = self.get_typeids() if offset in typeids: return typeids[offset] From noreply at buildbot.pypy.org Mon Jul 8 18:47:11 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Mon, 8 Jul 2013 18:47:11 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: wrap copy_string_to_raw inside a oopspec, because the JIT complains if it sees a cast_ptr_to_adr on a GC-managed object Message-ID: <20130708164711.0DEF71C0170@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65270:cd1d693151be Date: 2013-07-08 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/cd1d693151be/ Log: wrap copy_string_to_raw inside a oopspec, because the JIT complains if it sees a cast_ptr_to_adr on a GC-managed object diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -79,6 +79,9 @@ # OS_RAW_MALLOC_VARSIZE_CHAR = 110 OS_RAW_FREE = 111 + # + OS_STR_COPY_TO_RAW = 112 + OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1665,12 +1665,14 @@ dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, "stroruni.slice": EffectInfo.OS_STR_SLICE, "stroruni.equal": EffectInfo.OS_STR_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_STR_COPY_TO_RAW, } CHR = lltype.Char elif SoU.TO == rstr.UNICODE: dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, "stroruni.slice": EffectInfo.OS_UNI_SLICE, "stroruni.equal": EffectInfo.OS_UNI_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar else: diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -83,6 +83,7 @@ copy_string_contents = func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) + @jit.oopspec('stroruni.copy_string_to_raw(src, ptrdst, srcstart, length)') def copy_string_to_raw(src, ptrdst, srcstart, length): """ Copies 'length' characters from the 'src' string to the 'ptrdst' diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -1138,10 +1138,6 @@ self.interpret(f, [array, 4]) assert list(array) == list('abc'*4) lltype.free(array, flavor='raw') - - - - class TestOOtype(BaseTestRstr, OORtypeMixin): pass From noreply at buildbot.pypy.org Tue Jul 9 03:00:35 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:35 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove all uses of OORtypeMixin and LLRtypeMixin Message-ID: <20130709010035.457321C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65271:c125ce6208a1 Date: 2013-07-07 19:11 +0200 http://bitbucket.org/pypy/pypy/changeset/c125ce6208a1/ Log: Remove all uses of OORtypeMixin and LLRtypeMixin diff too long, truncating to 2000 out of 2364 lines diff --git a/rpython/rlib/unicodedata/test/test_ucd.py b/rpython/rlib/unicodedata/test/test_ucd.py --- a/rpython/rlib/unicodedata/test/test_ucd.py +++ b/rpython/rlib/unicodedata/test/test_ucd.py @@ -1,9 +1,9 @@ from rpython.rlib.runicode import code_to_unichr from rpython.rlib.unicodedata import unicodedb_5_2_0 -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest -class TestTranslated(BaseRtypingTest, LLRtypeMixin): +class TestTranslated(BaseRtypingTest): def test_translated(self): def f(n): if n == 0: diff --git a/rpython/rtyper/lltypesystem/module/test/test_llinterp_math.py b/rpython/rtyper/lltypesystem/module/test/test_llinterp_math.py --- a/rpython/rtyper/lltypesystem/module/test/test_llinterp_math.py +++ b/rpython/rtyper/lltypesystem/module/test/test_llinterp_math.py @@ -1,13 +1,12 @@ """Just another bunch of tests for llmath, run on top of llinterp.""" -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem.module import ll_math import math from rpython.rlib import rfloat -# XXX no OORtypeMixin here -class TestMath(BaseRtypingTest, LLRtypeMixin): +class TestMath(BaseRtypingTest): def new_unary_test(name): try: fn = getattr(math, name) diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py --- a/rpython/rtyper/lltypesystem/test/test_rffi.py +++ b/rpython/rtyper/lltypesystem/test/test_rffi.py @@ -6,11 +6,9 @@ from rpython.rlib.rposix import get_errno, set_errno from rpython.translator.c.test.test_genc import compile as compile_c from rpython.rtyper.lltypesystem.lltype import Signed, Ptr, Char, malloc -from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import lltype from rpython.tool.udir import udir from rpython.rtyper.test.test_llinterp import interpret -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin from rpython.annotator.annrpython import RPythonAnnotator from rpython.rtyper.rtyper import RPythonTyper from rpython.translator.backendopt.all import backend_optimizations @@ -56,33 +54,33 @@ xf = self.compile(f, []) assert xf() == 8+3 - + def test_string(self): eci = ExternalCompilationInfo(includes=['string.h']) z = llexternal('strlen', [CCHARP], Signed, compilation_info=eci) - + def f(): s = str2charp("xxx") res = z(s) free_charp(s) return res - + xf = self.compile(f, [], backendopt=False) assert xf() == 3 - + def test_unicode(self): eci = ExternalCompilationInfo(includes=['string.h']) z = llexternal('wcslen', [CWCHARP], Signed, compilation_info=eci) - + def f(): s = unicode2wcharp(u"xxx\xe9") res = z(s) free_wcharp(s) return res - + xf = self.compile(f, [], backendopt=False) assert xf() == 4 - + def test_string_reverse(self): c_source = py.code.Source(""" #include @@ -103,7 +101,7 @@ eci = ExternalCompilationInfo(separate_module_sources=[c_source], post_include_bits=['char *f(char*);']) z = llexternal('f', [CCHARP], CCHARP, compilation_info=eci) - + def f(): s = str2charp("xxx") l_res = z(s) @@ -111,14 +109,14 @@ lltype.free(l_res, flavor='raw') free_charp(s) return len(res) - + xf = self.compile(f, [], backendopt=False) assert xf() == 3 - + def test_stringstar(self): c_source = """ #include - + int f(char *args[]) { char **p = args; int l = 0; @@ -131,17 +129,17 @@ """ eci = ExternalCompilationInfo(separate_module_sources=[c_source]) z = llexternal('f', [CCHARPP], Signed, compilation_info=eci) - + def f(): l = ["xxx", "x", "xxxx"] ss = liststr2charpp(l) result = z(ss) free_charpp(ss) return result - + xf = self.compile(f, [], backendopt=False) assert xf() == 8 - + def test_struct(self): h_source = """ #ifndef _MY_SOURCE_H @@ -155,24 +153,24 @@ """ h_file = udir.join("structxx.h") h_file.write(h_source) - + c_source = """ #include - + int f(struct xx* z) { return (z->one + z->three); } """ TP = CStructPtr('xx', ('one', INT), ('two', Char), ('three', INT)) - + eci = ExternalCompilationInfo( includes=['structxx.h'], include_dirs=[udir], separate_module_sources=[c_source] ) z = llexternal('f', [TP], INT, compilation_info=eci) - + def f(): struct = lltype.malloc(TP.TO, flavor='raw') struct.c_one = cast(INT, 3) @@ -181,10 +179,10 @@ result = z(struct) lltype.free(struct, flavor='raw') return cast(SIGNED, result) - + fn = self.compile(f, [], backendopt=False) assert fn() == 8 - + def test_externvar(self): import os if os.name == 'nt': @@ -192,25 +190,25 @@ bad_fd = 0 else: bad_fd = 12312312 - + def f(): set_errno(12) return get_errno() - + def g(): try: os.write(bad_fd, "xxx") except OSError: pass return get_errno() - + fn = self.compile(f, []) assert fn() == 12 gn = self.compile(g, []) import errno assert gn() == errno.EBADF - - + + def test_extra_include_dirs(self): udir.ensure("incl", dir=True) udir.join("incl", "incl.h").write("#define C 3") @@ -227,21 +225,21 @@ separate_module_sources=[c_source] ) z = llexternal('fun', [], Signed, compilation_info=eci) - + def f(): return z() - + res = self.compile(f, []) assert res() == 3 - + def test_compile_cast(self): def f(n): return cast(SIZE_T, n) - + f1 = self.compile(f, [int]) res = f1(-1) assert res == r_size_t(-1) - + def test_opaque_type(self): h_source = py.code.Source(""" #ifndef _OPAQUE_H @@ -251,22 +249,22 @@ }; #endif /* _OPAQUE_H */ """) - + c_source = py.code.Source(""" #include "opaque.h" - + char get(struct stuff* x) { x->data[13] = 'a'; return x->data[13]; } """) - + # if it doesn't segfault, than we probably malloced it :-) h_file = udir.join("opaque.h") h_file.write(h_source) - + from rpython.rtyper.tool import rffi_platform eci = ExternalCompilationInfo( includes=['opaque.h'], @@ -274,15 +272,15 @@ separate_module_sources=[c_source] ) STUFFP = COpaquePtr('struct stuff', compilation_info=eci) - + ll_get = llexternal('get', [STUFFP], CHAR, compilation_info=eci) - + def f(): ll_stuff = lltype.malloc(STUFFP.TO, flavor='raw') result = ll_get(ll_stuff) lltype.free(ll_stuff, flavor='raw') return result - + f1 = self.compile(f, []) assert f1() == 'a' @@ -319,25 +317,25 @@ """ % (ctype_pref, )) h_file = udir.join("opaque2%s.h" % (ctype_pref, )) h_file.write(h_source) - + from rpython.rtyper.tool import rffi_platform eci = ExternalCompilationInfo( includes=[h_file.basename], include_dirs=[str(udir)] ) ll_returnchar = llexternal('returnchar', [], rffi_type, compilation_info=eci) - + def f(): result = ll_returnchar() return result - + f1 = self.compile(f, []) assert f1() == chr(42) def test_generate_return_char_tests(self): yield self.return_char, False yield self.return_char, True - + def test_prebuilt_constant(self): py.test.skip("Think how to do it sane") h_source = py.code.Source(""" @@ -347,19 +345,19 @@ """) h_include = udir.join('constants.h') h_include.write(h_source) - + eci = ExternalCompilationInfo(includes=['stdio.h', str(h_include.basename)], include_dirs=[str(udir)]) - + get_x, set_x = CExternVariable(lltype.Signed, 'x', eci) get_z, set_z = CExternVariable(CCHARPP, 'z', eci) - + def f(): one = get_x() set_x(13) return one + get_x() - + def g(): l = liststr2charpp(["a", "b", "c"]) try: @@ -367,7 +365,7 @@ return charp2str(get_z()[2]) finally: free_charpp(l) - + fn = self.compile(f, []) assert fn() == 16 gn = self.compile(g, []) @@ -380,7 +378,7 @@ extern Signed eating_callback(Signed arg, Signed(*call)(Signed)); #endif /* _CALLBACK_H */ """) - + h_include = udir.join('callback.h') h_include.write(h_source) @@ -425,7 +423,7 @@ def two(i): return i + 2 - + def f(i): if i > 3: return eating_callback(i, one) @@ -471,7 +469,7 @@ TP = lltype.Ptr(lltype.GcStruct('X', ('x', lltype.Signed))) struct = lltype.malloc(TP.TO) # gc structure struct.x = 8 - + def g(i): return get_keepalive_object(i, TP).x @@ -495,7 +493,7 @@ assert f() == d[:-1] fn = self.compile(f, [], gcpolicy='ref') assert fn() == d[:-1] - + def test_nonmoving_unicode(self): d = u'non-moving data' def f(): @@ -558,7 +556,7 @@ return cast(SIGNED, res) assert f() == 3 assert interpret(f, []) == 3 - + def test_structcopy(self): X2 = lltype.Struct('X2', ('x', SIGNED)) X1 = lltype.Struct('X1', ('a', SIGNED), ('x2', X2), ('p', lltype.Ptr(X2))) @@ -582,7 +580,7 @@ assert f() == 123 res = interpret(f, []) assert res == 123 - + def test_make_annotation(self): X = CStruct('xx', ('one', INT)) def f(): @@ -594,14 +592,14 @@ lltype.free(p, flavor='raw') return 3 assert interpret(f, []) == 3 - + def test_implicit_cast(self): z = llexternal('z', [USHORT, ULONG, USHORT, DOUBLE], USHORT, sandboxsafe=True) # to allow the wrapper to be inlined - + def f(x, y, xx, yy): return z(x, y, xx, yy) - + a = RPythonAnnotator() r = a.build_types(f, [int, int, int, int]) rtyper = RPythonTyper(a) @@ -616,14 +614,14 @@ expected = {'force_cast': 3, 'cast_int_to_float': 1, 'direct_call': 1} for k, v in expected.items(): assert s[k] == v - + def test_stringpolicy1(self): eci = ExternalCompilationInfo(includes=['string.h']) strlen = llexternal('strlen', [CCHARP], SIZE_T, compilation_info=eci) def f(): return cast(SIGNED, strlen("Xxx")) assert interpret(f, [], backendopt=True) == 3 - + def test_stringpolicy3(self): eci = ExternalCompilationInfo(includes=['string.h']) strlen = llexternal('strlen', [CCHARP], INT, compilation_info=eci) @@ -632,9 +630,9 @@ res = strlen(ll_str) lltype.free(ll_str, flavor='raw') return res - + assert interpret(f, [], backendopt=True) == 3 - + def test_stringpolicy_mixed(self): eci = ExternalCompilationInfo(includes=['string.h']) strlen = llexternal('strlen', [CCHARP], SIZE_T, @@ -645,9 +643,9 @@ res2 = strlen(ll_str) lltype.free(ll_str, flavor='raw') return cast(SIGNED, res1*10 + res2) - - assert interpret(f, [], backendopt=True) == 43 - + + assert interpret(f, [], backendopt=True) == 43 + def test_around_extcall(self): if sys.platform == "win32": py.test.skip('No pipes on windows') @@ -666,7 +664,7 @@ os.write(write_fd, s) register_external(mywrite, [str], annmodel.s_None, 'll_mywrite', llfakeimpl=llimpl, sandboxsafe=True) - + def before(): mywrite("B") def after(): @@ -675,11 +673,11 @@ os.write(write_fd, "-") invoke_around_extcall(before, after) os.write(write_fd, "E") - + interpret(f, []) data = os.read(read_fd, 99) assert data == "-BEA" - + finally: os.close(write_fd) os.close(read_fd) @@ -688,16 +686,16 @@ """ Try to call some llexternal function with llinterp """ z = llexternal('z', [Signed], Signed, _callable=lambda x:x+1) - + def f(): return z(2) - + res = interpret(f, []) assert res == 3 def test_size_t_sign(self): assert r_size_t(-1) > 0 - + def test_cast(self): res = cast(SIZE_T, -1) assert type(res) is r_size_t @@ -705,7 +703,7 @@ # res = cast(lltype.Signed, 42.5) assert res == 42 - + res = cast(lltype.SingleFloat, 12.3) assert res == r_singlefloat(12.3) res = cast(lltype.SingleFloat, res) @@ -738,7 +736,7 @@ ULONGLONG: ctypes.c_ulonglong, SIZE_T: ctypes.c_size_t, } - + for ll, ctp in cache.items(): assert sizeof(ll) == ctypes.sizeof(ctp) assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll) @@ -747,7 +745,7 @@ assert size_and_sign(lltype.UniChar)[1] assert size_and_sign(UINT)[1] assert not size_and_sign(INT)[1] - + def test_rffi_offsetof(self): import struct from rpython.rtyper.tool import rffi_platform @@ -777,7 +775,7 @@ for i in xrange(len(data) - 2): assert a2[i] == a[i + 2] lltype.free(a, flavor='raw') - + def test_ptradd_interpret(): interpret(test_ptradd, []) diff --git a/rpython/rtyper/module/test/test_ll_strtod.py b/rpython/rtyper/module/test/test_ll_strtod.py --- a/rpython/rtyper/module/test/test_ll_strtod.py +++ b/rpython/rtyper/module/test/test_ll_strtod.py @@ -1,9 +1,9 @@ import py -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib import rfloat -class BaseTestStrtod(BaseRtypingTest): +class TestStrtod(BaseRtypingTest): def test_formatd(self): for flags in [0, rfloat.DTSF_ADD_DOT_0]: @@ -14,12 +14,12 @@ def test_parts_to_float(self): from rpython.rtyper.annlowlevel import hlstr - + def f(a, b, c, d): a,b,c,d = hlstr(a), hlstr(b), hlstr(c), hlstr(d) - + return rfloat.parts_to_float(a, b, c, d) - + data = [ (("","1","","") , 1.0), (("-","1","","") , -1.0), @@ -32,6 +32,3 @@ for parts, val in data: args = [self.string_to_ll(i) for i in parts] assert self.interpret(f, args) == val - -class TestLLStrtod(BaseTestStrtod, LLRtypeMixin): - pass diff --git a/rpython/rtyper/module/test/test_ll_time.py b/rpython/rtyper/module/test/test_ll_time.py --- a/rpython/rtyper/module/test/test_ll_time.py +++ b/rpython/rtyper/module/test/test_ll_time.py @@ -1,14 +1,14 @@ -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest #from rpython.translator.c.test.test_genc import compile import time, sys -class BaseTestTime(BaseRtypingTest): +class TestTime(BaseRtypingTest): def test_time_time(self): def fn(): return time.time() - + t0 = time.time() res0 = self.interpret(fn, []) t1 = time.time() @@ -54,9 +54,3 @@ t1 = time.time() assert t0 <= t1 assert t1 - t0 >= 0.15 - -class TestLLType(BaseTestTime, LLRtypeMixin): - pass - -class TestOOType(BaseTestTime, OORtypeMixin): - pass diff --git a/rpython/rtyper/module/test/test_posix.py b/rpython/rtyper/module/test/test_posix.py --- a/rpython/rtyper/module/test/test_posix.py +++ b/rpython/rtyper/module/test/test_posix.py @@ -1,5 +1,5 @@ import py -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib.rarithmetic import is_valid_int @@ -10,7 +10,7 @@ testf = udir.join('test.txt') module.path = testf.strpath -class BaseTestPosix(BaseRtypingTest): +class TestPosix(BaseRtypingTest): def setup_method(self, meth): # prepare/restore the file before each test @@ -70,7 +70,7 @@ def f(fi, pos): posix.lseek(fi, pos, 0) fi = os.open(path, os.O_RDONLY, 0777) - func = self.interpret(f, [fi, 5]) + func = self.interpret(f, [fi, 5]) res = os.read(fi, 2) assert res =='is' @@ -129,11 +129,11 @@ return 1 except OSError: return 2 - + assert self.interpret(f, []) == 1 os.unlink(path) assert self.interpret(f, []) == 2 - + def test_close(self): def f(fi): return posix.close(fi) @@ -148,7 +148,7 @@ def f(fi,len): os.ftruncate(fi,len) fi = os.open(path,os.O_RDWR,0777) - func = self.interpret(f,[fi,6]) + func = self.interpret(f,[fi,6]) assert os.fstat(fi).st_size == 6 if hasattr(os, 'getuid'): @@ -197,9 +197,7 @@ for value in [0, 1, 127, 128, 255]: res = self.interpret(fun, [value]) - assert res == fun(value) - -class TestLLtype(BaseTestPosix, LLRtypeMixin): + assert res == fun(value) if hasattr(os, 'getgroups'): def test_getgroups(self): @@ -207,19 +205,3 @@ return os.getgroups() ll_a = self.interpret(f, []) assert self.ll_to_list(ll_a) == f() - -class TestOOtype(BaseTestPosix, OORtypeMixin): - def test_fstat(self): - py.test.skip("ootypesystem does not support os.fstat") - - def test_os_chroot(self): - py.test.skip("ootypesystem does not support os.chroot") - - def test_stat(self): - py.test.skip("ootypesystem does not support os.stat") - - def test_stat_exception(self): - py.test.skip("ootypesystem does not support os.stat") - - def test_chown(self): - py.test.skip("ootypesystem does not support os.chown") diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -2,16 +2,15 @@ """ Few tests for annlowlevel helpers """ -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem.rstr import mallocstr, mallocunicode from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.annlowlevel import hlstr, llstr, oostr +from rpython.rtyper.annlowlevel import hlstr, llstr from rpython.rtyper.annlowlevel import hlunicode, llunicode from rpython.rtyper import annlowlevel -class TestLLType(BaseRtypingTest, LLRtypeMixin): +class TestLLType(BaseRtypingTest): def test_hlstr(self): s = mallocstr(3) s.chars[0] = "a" @@ -33,7 +32,7 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 - + def test_hlunicode(self): s = mallocunicode(3) s.chars[0] = u"a" @@ -64,31 +63,3 @@ assert lltype.typeOf(ptr) == annlowlevel.base_ptr_lltype() y = annlowlevel.cast_base_ptr_to_instance(X, ptr) assert y is x - - -class TestOOType(BaseRtypingTest, OORtypeMixin): - def test_hlstr(self): - s = ootype.make_string("abc") - assert hlstr(s) == "abc" - - def test_oostr(self): - s = oostr("abc") - assert ootype.typeOf(s) == ootype.String - assert s._str == "abc" - - def test_oostr_compile(self): - def f(arg): - s = oostr(hlstr(arg)) - return s.ll_strlen() - - res = self.interpret(f, [self.string_to_ll("abc")]) - assert res == 3 - - def test_cast_instance_to_base_obj(self): - class X(object): - pass - x = X() - obj = annlowlevel.cast_instance_to_base_obj(x) - assert lltype.typeOf(obj) == annlowlevel.base_obj_ootype() - y = annlowlevel.cast_base_ptr_to_instance(X, obj) - assert y is x diff --git a/rpython/rtyper/test/test_exception.py b/rpython/rtyper/test/test_exception.py --- a/rpython/rtyper/test/test_exception.py +++ b/rpython/rtyper/test/test_exception.py @@ -1,7 +1,7 @@ import py from rpython.translator.translator import TranslationContext -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.llinterp import LLException from rpython.rtyper.error import MissingRTypeOperation @@ -33,7 +33,7 @@ rtype(dummyfn) -class BaseTestException(BaseRtypingTest): +class TestException(BaseRtypingTest): def test_exception_with_arg(self): def g(n): raise OSError(n, "?") @@ -113,8 +113,6 @@ res = self.interpret(f, [42]) assert res == 42 - -class TestLLtype(BaseTestException, LLRtypeMixin): def test_cannot_raise_ll_exception(self): from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr def g(): @@ -127,6 +125,3 @@ except OverflowError: return 42 py.test.raises(MissingRTypeOperation, self.interpret, f, []) - -class TestOOtype(BaseTestException, OORtypeMixin): - pass diff --git a/rpython/rtyper/test/test_generator.py b/rpython/rtyper/test/test_generator.py --- a/rpython/rtyper/test/test_generator.py +++ b/rpython/rtyper/test/test_generator.py @@ -1,9 +1,9 @@ import py -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest -class BaseTestGenerator(BaseRtypingTest): +class TestGenerator(BaseRtypingTest): def test_simple_explicit(self): def g(a, b, c): @@ -88,12 +88,3 @@ return s res = self.interpret(g, []) assert res == 6 - - -class TestLLtype(BaseTestGenerator, LLRtypeMixin): - pass - - -class TestOOtype(BaseTestGenerator, OORtypeMixin): - def test_iterating_generator(self): - py.test.skip("Iterators aren't supported on OOtype yet") diff --git a/rpython/rtyper/test/test_rbool.py b/rpython/rtyper/test/test_rbool.py --- a/rpython/rtyper/test/test_rbool.py +++ b/rpython/rtyper/test/test_rbool.py @@ -1,7 +1,7 @@ from rpython.translator.translator import TranslationContext from rpython.annotator import unaryop, binaryop from rpython.rtyper.test import snippet -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest class TestSnippet(object): @@ -33,7 +33,7 @@ for opname in binaryop.BINARY_OPERATIONS: print 'BINARY_OPERATIONS:', opname -class BaseTestRbool(BaseRtypingTest): +class TestRbool(BaseRtypingTest): def test_bool2int(self): def f(n): @@ -82,9 +82,3 @@ assert res == 1 res = self.interpret(f, [1, True]) assert res == 1 - -class TestLLtype(BaseTestRbool, LLRtypeMixin): - pass - -class TestOOtype(BaseTestRbool, OORtypeMixin): - pass diff --git a/rpython/rtyper/test/test_rbuilder.py b/rpython/rtyper/test/test_rbuilder.py --- a/rpython/rtyper/test/test_rbuilder.py +++ b/rpython/rtyper/test/test_rbuilder.py @@ -6,7 +6,7 @@ from rpython.rtyper.annlowlevel import llstr, hlstr from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem.rbuilder import StringBuilderRepr -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest class TestStringBuilderDirect(object): @@ -25,7 +25,7 @@ assert StringBuilderRepr.ll_build(sb) == sb.buf -class BaseTestStringBuilder(BaseRtypingTest): +class TestStringBuilder(BaseRtypingTest): def test_simple(self): def func(): s = StringBuilder() @@ -92,7 +92,7 @@ if s: s.append("3") return bool(s) - + def func(i): if i: s = StringBuilder() @@ -109,7 +109,7 @@ if s: s.append(u"3") return bool(s) - + def func(i): if i: s = UnicodeBuilder() @@ -120,11 +120,3 @@ assert not res res = self.interpret(func, [1]) assert res - - -class TestLLtype(BaseTestStringBuilder, LLRtypeMixin): - pass - -class TestOOtype(BaseTestStringBuilder, OORtypeMixin): - def test_append_charpsize(self): - py.test.skip("append_charpsize(): not implemented on ootype") diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -11,7 +11,7 @@ from rpython.rtyper.annlowlevel import hlstr, LowLevelAnnotatorPolicy from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.test import test_llinterp -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool import udir from rpython.translator.translator import graphof @@ -24,7 +24,7 @@ yield op -class BaseTestRbuiltin(BaseRtypingTest): +class TestRbuiltin(BaseRtypingTest): def test_method_join(self): # this is tuned to catch a specific bug: @@ -453,9 +453,8 @@ assert x0 != x2 # the following checks are probably too precise, but work at # least on top of llinterp - if type(self) is TestLLtype: - assert x1 == intmask(x0) - assert x3 == intmask(x2) + assert x1 == intmask(x0) + assert x3 == intmask(x2) def test_id_on_builtins(self): def fn(): @@ -537,8 +536,6 @@ res = self.interpret(f, [12]) assert res == 512 - -class TestLLtype(BaseTestRbuiltin, LLRtypeMixin): def test_cast(self): def llfn(v): return rffi.cast(rffi.VOIDP, v) @@ -559,23 +556,3 @@ res = self.interpret(llfn, [lltype.nullptr(rffi.VOIDP.TO)]) assert res == 0 assert isinstance(res, r_ulonglong) - - -class TestOOtype(BaseTestRbuiltin, OORtypeMixin): - - def test_instantiate_multiple_meta(self): - class A: - x = 2 - class B(A): - x = 3 - def do_stuff(cls): - return cls.x - def f(i): - if i == 1: - cls = A - else: - cls = B - do_stuff(cls) - return instantiate(cls) - res = self.interpret(f, [1]) - assert res.getmeta() # check that it exists diff --git a/rpython/rtyper/test/test_rbytearray.py b/rpython/rtyper/test/test_rbytearray.py --- a/rpython/rtyper/test/test_rbytearray.py +++ b/rpython/rtyper/test/test_rbytearray.py @@ -1,9 +1,9 @@ -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem.rbytearray import hlbytearray from rpython.rtyper.annlowlevel import llstr, hlstr -class TestByteArray(BaseRtypingTest, LLRtypeMixin): +class TestByteArray(BaseRtypingTest): def test_bytearray_creation(self): def f(x): if x: diff --git a/rpython/rtyper/test/test_rclass.py b/rpython/rtyper/test/test_rclass.py --- a/rpython/rtyper/test/test_rclass.py +++ b/rpython/rtyper/test/test_rclass.py @@ -10,7 +10,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.rclass import (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY, IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY) -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.translator.translator import TranslationContext, graphof @@ -36,7 +36,7 @@ class C(B): pass -class BaseTestRclass(BaseRtypingTest): +class TestRclass(BaseRtypingTest): def test_instanceattr(self): def dummyfn(): @@ -71,7 +71,7 @@ return A else: return B - + def dummyfn(i): C = pick(i) i = C() @@ -93,7 +93,7 @@ return A else: return B - + def dummyfn(i): C = pick(i) i = C() @@ -281,14 +281,14 @@ class B(A): pass - + def f(): a = A() b = B() a.meth(1) # the self of this variant is annotated with A b.meth(2) # the self of this variant is annotated with B return 42 - + res = self.interpret(f, []) assert res == 42 @@ -300,18 +300,18 @@ class B(A): pass def f(i): - if i == 0: + if i == 0: c1 = A() - else: + else: c1 = B() return issubclass(type(c1), B) - assert self.interpret(f, [0]) == False + assert self.interpret(f, [0]) == False assert self.interpret(f, [1]) == True def g(i): - if i == 0: + if i == 0: c1 = A() - else: + else: c1 = B() return issubclass(type(c1), A) assert self.interpret(g, [0]) == True @@ -468,11 +468,9 @@ # a value that is (or was) the current_object_addr_as_int(). # --- disabled: assert res.item0 == res.item1 # the following property is essential on top of the lltypesystem - # otherwise prebuilt dictionaries are broken. It's wrong on - # top of the ootypesystem though. - if isinstance(self, LLRtypeMixin): - assert res.item2 == h_c - assert res.item3 == h_d + # otherwise prebuilt dictionaries are broken. + assert res.item2 == h_c + assert res.item3 == h_d def test_circular_hash_initialization(self): class B: @@ -517,10 +515,10 @@ def f(): return type(a) is A - + res = self.interpret(f, []) - - + + def test_void_fnptr(self): def g(): return 42 @@ -732,7 +730,7 @@ def test_immutable(self): class I(object): _immutable_ = True - + def __init__(self, v): self.v = v @@ -973,8 +971,6 @@ self.gengraph(f, []) -class TestLLtype(BaseTestRclass, LLRtypeMixin): - def test__del__(self): class A(object): def __init__(self): @@ -1161,7 +1157,7 @@ class Iterable(object): def __init__(self): self.counter = 0 - + def __iter__(self): return self @@ -1184,7 +1180,7 @@ class BaseIterable(object): def __init__(self): self.counter = 0 - + def __iter__(self): return self @@ -1193,7 +1189,7 @@ raise StopIteration self.counter += self.step return self.counter - 1 - + class Iterable(BaseIterable): step = 1 @@ -1212,125 +1208,3 @@ assert self.interpret(f, [True]) == f(True) assert self.interpret(f, [False]) == f(False) - - -class TestOOtype(BaseTestRclass, OORtypeMixin): - - def test__del__(self): - class A(object): - def __init__(self): - self.a = 2 - def __del__(self): - self.a = 3 - def f(): - a = A() - return a.a - t = TranslationContext() - t.buildannotator().build_types(f, []) - t.buildrtyper(type_system=self.type_system).specialize() - graph = graphof(t, f) - TYPE = graph.startblock.operations[0].args[0].value - _, meth = TYPE._lookup("o__del__") - assert meth.finalizer - - def test_del_inheritance(self): - from rpython.rlib import rgc - class State: - pass - s = State() - s.a_dels = 0 - s.b_dels = 0 - class A(object): - def __del__(self): - s.a_dels += 1 - class B(A): - def __del__(self): - s.b_dels += 1 - class C(A): - pass - def f(): - A() - B() - C() - A() - B() - C() - rgc.collect() - return s.a_dels * 10 + s.b_dels - res = f() - assert res == 42 - t = TranslationContext() - t.buildannotator().build_types(f, []) - t.buildrtyper(type_system=self.type_system).specialize() - graph = graphof(t, f) - TYPEA = graph.startblock.operations[0].args[0].value - TYPEB = graph.startblock.operations[1].args[0].value - TYPEC = graph.startblock.operations[2].args[0].value - _, destra = TYPEA._lookup("o__del__") - _, destrb = TYPEB._lookup("o__del__") - _, destrc = TYPEC._lookup("o__del__") - assert destra == destrc - assert destrb is not None - assert destra is not None - - def test_cast_object_instance(self): - A = ootype.Instance("Foo", ootype.ROOT) - - def fn_instance(): - a = ootype.new(A) - obj = ootype.cast_to_object(a) - a2 = ootype.cast_from_object(A, obj) - a3 = ootype.cast_from_object(ootype.ROOT, obj) - assert a is a2 - assert a is a3 - self.interpret(fn_instance, []) - - def test_cast_object_record(self): - B = ootype.Record({'x': ootype.Signed}) - - def fn_record(): - b = ootype.new(B) - b.x = 42 - obj = ootype.cast_to_object(b) - b2 = ootype.cast_from_object(B, obj) - assert b2.x == 42 - assert b is b2 - self.interpret(fn_record, []) - - def test_cast_object_null(self): - A = ootype.Instance("Foo", ootype.ROOT) - B = ootype.Record({'x': ootype.Signed}) - - def fn_null(): - a = ootype.null(A) - b = ootype.null(B) - obj1 = ootype.cast_to_object(a) - obj2 = ootype.cast_to_object(b) - assert obj1 == obj2 - assert ootype.cast_from_object(A, obj1) == a - assert ootype.cast_from_object(B, obj2) == b - self.interpret(fn_null, []) - - def test_cast_object_is_true(self): - A = ootype.Instance("Foo", ootype.ROOT) - def fn_is_true(flag): - if flag: - a = ootype.new(A) - else: - a = ootype.null(A) - obj = ootype.cast_to_object(a) - return bool(obj) - assert self.interpret(fn_is_true, [True]) is True - assert self.interpret(fn_is_true, [False]) is False - - def test_cast_object_mix_null(self): - A = ootype.Instance("Foo", ootype.ROOT) - def fn_mix_null(flag): - a = ootype.new(A) - obj = ootype.cast_to_object(a) - if flag: - return obj - else: - return ootype.NULL - res = self.interpret(fn_mix_null, [False]) - assert res is ootype.NULL diff --git a/rpython/rtyper/test/test_rconstantdict.py b/rpython/rtyper/test/test_rconstantdict.py --- a/rpython/rtyper/test/test_rconstantdict.py +++ b/rpython/rtyper/test/test_rconstantdict.py @@ -1,12 +1,12 @@ import py from rpython.rlib.objectmodel import r_dict -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest -class BaseTestRconstantdict(BaseRtypingTest): +class TestRconstantdict(BaseRtypingTest): - def test_constant_int_dict(self): - d = {1: 2, 2: 3, 3: 4} - def func(i): + def test_constant_int_dict(self): + d = {1: 2, 2: 3, 3: 4} + def func(i): return d[i] res = self.interpret(func, [3]) assert res == 4 @@ -60,10 +60,3 @@ for x in range(65, 91): res = self.interpret(func, [x]) assert res == x*x - - -class TestLLtype(BaseTestRconstantdict, LLRtypeMixin): - pass - -class TestOOtype(BaseTestRconstantdict, OORtypeMixin): - pass diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -2,7 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper import rint from rpython.rtyper.lltypesystem import rdict, rstr -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.objectmodel import r_dict from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong @@ -22,7 +22,7 @@ yield x -class BaseTestRdict(BaseRtypingTest): +class TestRdict(BaseRtypingTest): def test_dict_creation(self): def createdict(i): @@ -683,7 +683,6 @@ res = self.interpret(f, [700]) assert res == 12 -class TestLLtype(BaseTestRdict, LLRtypeMixin): def test_dict_but_not_with_char_keys(self): def func(i): d = {'h': i} @@ -1003,21 +1002,6 @@ finally: lltype._array._check_range = original_check_range - # ____________________________________________________________ - - - -class TestOOtype(BaseTestRdict, OORtypeMixin): - - def test_recursive(self): - def func(i): - dic = {i: {}} - dic[i] = dic - return dic[i] - res = self.interpret(func, [5]) - assert res.ll_get(5) is res - - # ____________________________________________________________ class TestStress: diff --git a/rpython/rtyper/test/test_remptydict.py b/rpython/rtyper/test/test_remptydict.py --- a/rpython/rtyper/test/test_remptydict.py +++ b/rpython/rtyper/test/test_remptydict.py @@ -1,5 +1,5 @@ import py -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest class BaseTestRemptydict(BaseRtypingTest): def test_empty_dict(self): @@ -26,16 +26,3 @@ return n res = self.interpret(f, []) assert res == 0 - -class TestLLtype(BaseTestRemptydict, LLRtypeMixin): - pass - -class TestOOtype(BaseTestRemptydict, OORtypeMixin): - def test_almost_empty_dict(self): - def f(flag): - d = {} - if flag: - d[None] = None - return None in d - assert self.interpret(f, [True]) is True - assert self.interpret(f, [False]) is False diff --git a/rpython/rtyper/test/test_rfloat.py b/rpython/rtyper/test/test_rfloat.py --- a/rpython/rtyper/test/test_rfloat.py +++ b/rpython/rtyper/test/test_rfloat.py @@ -2,7 +2,7 @@ from rpython.translator.translator import TranslationContext from rpython.annotator import unaryop, binaryop from rpython.rtyper.test import snippet -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.rarithmetic import ( r_int, r_uint, r_longlong, r_ulonglong, r_singlefloat) from rpython.rlib.objectmodel import compute_hash @@ -13,8 +13,8 @@ t = TranslationContext() t.buildannotator().build_types(func, types) t.buildrtyper().specialize() - t.checkgraphs() - + t.checkgraphs() + def test_not1(self): self._test(snippet.not1, [float]) @@ -37,7 +37,7 @@ for opname in binaryop.BINARY_OPERATIONS: print 'BINARY_OPERATIONS:', opname -class BaseTestRfloat(BaseRtypingTest): +class TestRfloat(BaseRtypingTest): inf = 'inf' minus_inf = '-inf' @@ -73,9 +73,9 @@ res = self.interpret(fn, [1.0]) assert res == 1 - assert type(res) is int + assert type(res) is int res = self.interpret(fn, [2.34]) - assert res == fn(2.34) + assert res == fn(2.34) def test_longlong_conversion(self): def fn(f): @@ -89,7 +89,7 @@ else: assert self.is_of_type(res, r_longlong) res = self.interpret(fn, [2.34]) - assert res == fn(2.34) + assert res == fn(2.34) big = float(0x7fffffffffffffff) x = big - 1.e10 assert x != big @@ -276,27 +276,8 @@ assert self.interpret(func, [0]) == 1e23 assert self.interpret(func, [1]) == -1e23 - - -class TestLLtype(BaseTestRfloat, LLRtypeMixin): - def test_hash(self): def fn(f): return compute_hash(f) res = self.interpret(fn, [1.5]) assert res == compute_hash(1.5) - - -class TestOOtype(BaseTestRfloat, OORtypeMixin): - - def test_formatd(self): - py.test.skip('formatd is broken on ootype') - - def test_formatd_repr(self): - py.test.skip('formatd is broken on ootype') - - def test_formatd_huge(self): - py.test.skip('formatd is broken on ootype') - - def test_parts_to_float(self): - py.test.skip('parts_to_float is broken on ootype') diff --git a/rpython/rtyper/test/test_rint.py b/rpython/rtyper/test/test_rint.py --- a/rpython/rtyper/test/test_rint.py +++ b/rpython/rtyper/test/test_rint.py @@ -6,11 +6,8 @@ from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import ovfcheck, r_int64, intmask, int_between from rpython.rlib import objectmodel -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.lltypesystem.lloperation import llop class TestSnippet(object): @@ -43,7 +40,7 @@ print 'BINARY_OPERATIONS:', opname -class BaseTestRint(BaseRtypingTest): +class TestRint(BaseRtypingTest): def test_char_constant(self): def dummyfn(i): @@ -424,15 +421,3 @@ assert not self.interpret(fn, [1, 5, 2]) assert not self.interpret(fn, [1, 2, 2]) assert not self.interpret(fn, [1, 1, 1]) - - - -class TestLLtype(BaseTestRint, LLRtypeMixin): - pass - -class TestOOtype(BaseTestRint, OORtypeMixin): - def test_oobox_int(self): - def f(): - x = llop.oobox_int(ootype.Object, 42) - return llop.oounbox_int(lltype.Signed, x) - assert self.interpret(f, []) == 42 diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -8,10 +8,9 @@ from rpython.rtyper.llinterp import LLException from rpython.rtyper.lltypesystem import rlist as ll_rlist from rpython.rtyper.lltypesystem.rlist import ListRepr, FixedSizeListRepr, ll_newlist, ll_fixed_newlist -from rpython.rtyper.ootypesystem import rlist as oo_rlist from rpython.rtyper.rint import signed_repr from rpython.rtyper.rlist import * -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest from rpython.translator.translator import TranslationContext @@ -188,6 +187,8 @@ class BaseTestRlist(BaseRtypingTest): + type_system = 'lltype' + rlist = ll_rlist def test_simple(self): def dummyfn(): @@ -1139,13 +1140,7 @@ res = self.interpret(f, [0]) assert res == 1 - if self.type_system == 'lltype': - # on lltype we always get an AssertionError - py.test.raises(AssertionError, self.interpret, f, [1]) - else: - # on ootype we happen to get through the ll_asserts and to - # hit the IndexError from ootype.py - self.interpret_raises(IndexError, f, [1]) + py.test.raises(AssertionError, self.interpret, f, [1]) def f(x): l = [1] @@ -1190,13 +1185,7 @@ res = self.interpret(f, [0]) assert res == 1 - if self.type_system == 'lltype': - # on lltype we always get an AssertionError - py.test.raises(AssertionError, self.interpret, f, [1]) - else: - # on ootype we happen to get through the ll_asserts and to - # hit the IndexError from ootype.py - self.interpret_raises(IndexError, f, [1]) + py.test.raises(AssertionError, self.interpret, f, [1]) def f(x): l = [1] @@ -1233,13 +1222,7 @@ res = self.interpret(f, [0]) assert res == 1 - if self.type_system == 'lltype': - # on lltype we always get an AssertionError - py.test.raises(AssertionError, self.interpret, f, [1]) - else: - # on ootype we happen to get through the ll_asserts and to - # hit the IndexError from ootype.py - self.interpret_raises(IndexError, f, [1]) + py.test.raises(AssertionError, self.interpret, f, [1]) def test_charlist_extension_1(self): def f(n): @@ -1435,10 +1418,6 @@ res = self.interpret(f, [0]) assert self.ll_to_string(res) == 'abc' -class TestLLtype(BaseTestRlist, LLRtypeMixin): - type_system = 'lltype' - rlist = ll_rlist - def test_memoryerror(self): def fn(i): lst = [0] * i @@ -1610,11 +1589,3 @@ assert res == sum(map(ord, 'abcdef')) finally: rlist.ll_getitem_foldable_nonneg = prev - - -class TestOOtype(BaseTestRlist, OORtypeMixin): - rlist = oo_rlist - type_system = 'ootype' - - def test_reversed(self): - py.test.skip("unsupported") diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -2,7 +2,7 @@ from rpython.annotator import policy, specialize from rpython.rtyper.lltypesystem.lltype import typeOf -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest class MyBase: @@ -40,7 +40,7 @@ return self.x + y -class BaseTestRPBC(BaseRtypingTest): +class TestRPBC(BaseRtypingTest): def test_easy_call(self): def f(x): @@ -182,7 +182,7 @@ class DeviceTask(Task): def fn(self, a): return self.waitTask(a)+3 - + def f(a, b): if b: inst = HandlerTask() @@ -190,7 +190,7 @@ inst = DeviceTask() return inst.runTask(a) - + assert self.interpret(f, [42, True]) == 45 assert self.interpret(f, [42, False]) == 46 @@ -323,9 +323,9 @@ fr = fr2 return getorbuild(fr) - res = self.interpret(f1, [0]) + res = self.interpret(f1, [0]) assert res == 7 - res = self.interpret(f1, [1]) + res = self.interpret(f1, [1]) assert res == 3 def test_call_memoized_function_with_bools(self): @@ -357,25 +357,25 @@ def test_call_memoized_cache(self): - # this test checks that we add a separate field - # per specialization and also it uses a subclass of + # this test checks that we add a separate field + # per specialization and also it uses a subclass of # the standard rpython.rlib.cache.Cache from rpython.rlib.cache import Cache fr1 = Freezing() fr2 = Freezing() - class Cache1(Cache): - def _build(self, key): - "NOT_RPYTHON" + class Cache1(Cache): + def _build(self, key): + "NOT_RPYTHON" if key is fr1: - return fr2 + return fr2 else: - return fr1 + return fr1 - class Cache2(Cache): - def _build(self, key): - "NOT_RPYTHON" + class Cache2(Cache): + def _build(self, key): + "NOT_RPYTHON" a = 1 if key is fr1: result = eval("a+2") @@ -394,9 +394,9 @@ newfr = cache1.getorbuild(fr) return cache2.getorbuild(newfr) - res = self.interpret(f1, [0]) + res = self.interpret(f1, [0]) assert res == 3 - res = self.interpret(f1, [1]) + res = self.interpret(f1, [1]) assert res == 7 def test_call_memo_with_single_value(self): @@ -525,7 +525,7 @@ res = self.interpret(f, [7]) assert res == 42 - def test_simple_function_pointer(self): + def test_simple_function_pointer(self): def f1(x): return x + 1 def f2(x): @@ -533,7 +533,7 @@ l = [f1, f2] - def pointersimple(i): + def pointersimple(i): return l[i](i) res = self.interpret(pointersimple, [1]) @@ -621,7 +621,7 @@ class A(object): pass def none(): return None - + def f(i): if i == 1: return none() @@ -631,7 +631,7 @@ assert not res res = self.interpret(f, [0]) assert self.ll_to_string(res) == "ab" - + def g(i): if i == 1: return none() @@ -642,7 +642,7 @@ res = self.interpret(g, [0]) assert self.class_name(res) == 'A' - + def test_conv_from_classpbcset_to_larger(self): class A(object): pass class B(A): pass @@ -652,7 +652,7 @@ return A def b(): return B - + def g(i): if i == 1: @@ -682,10 +682,10 @@ res = self.interpret(g, [0, 0]) assert self.class_name(res) == 'C' res = self.interpret(g, [0, 1]) - assert self.class_name(res) == 'B' + assert self.class_name(res) == 'B' res = self.interpret(g, [1, 0]) - assert self.class_name(res) == 'A' - + assert self.class_name(res) == 'A' + def test_call_starargs(self): def g(x=-100, *arg): return x + len(arg) @@ -849,7 +849,7 @@ return f1 def b(): return f2 - + def g(i): if i == 1: @@ -932,7 +932,7 @@ class B(A): def meth(self, a, b=0): return a+b - + class C(A): def meth(self, a, b=0): return a*b @@ -949,7 +949,7 @@ assert res == 1+3+2+7+11 res = self.interpret(f, [1]) assert res == 3*2+11*7 - + def test_multiple_ll_one_hl_op(self): class E(Exception): @@ -1609,7 +1609,7 @@ if a.func: return a.func(n) return -1 - + res = self.interpret(fn, [0]) assert res == -1 @@ -1640,20 +1640,14 @@ res = self.interpret(g, []) assert res == False -class TestLLtype(BaseTestRPBC, LLRtypeMixin): - pass - -class TestOOtype(BaseTestRPBC, OORtypeMixin): - pass - # ____________________________________________________________ -class BaseTestRPBCExtra(BaseRtypingTest): - +class TestRPBCExtra(BaseRtypingTest): + def test_folding_specialize_support(self): class S(object): - + def w(s, x): if isinstance(x, int): return x @@ -1690,7 +1684,7 @@ return funcdesc.cachedgraph(typ) p = P() - + res = self.interpret(f, [0, 66], policy=p) assert res == 0 res = self.interpret(f, [1, 66], policy=p) @@ -1699,18 +1693,7 @@ assert res == 12 res = self.interpret(f, [3, 5555], policy=p) assert res == 4 - -class TestExtraLLtype(BaseTestRPBCExtra, LLRtypeMixin): - pass -class TestExtraOOtype(BaseTestRPBCExtra, OORtypeMixin): - pass - -# ____________________________________________________________ -# We don't care about the following test_hlinvoke tests working on -# ootype. Maybe later. This kind of thing is only used in rdict -# anyway, that will probably have a different kind of implementation -# in ootype. def test_hlinvoke_simple(): def f(a,b): @@ -1719,8 +1702,8 @@ from rpython.annotator import annrpython a = annrpython.RPythonAnnotator() from rpython.annotator import model as annmodel - - s_f = a.bookkeeper.immutablevalue(f) + + s_f = a.bookkeeper.immutablevalue(f) a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()]) a.complete() @@ -1757,7 +1740,7 @@ from rpython.annotator import annrpython a = annrpython.RPythonAnnotator() from rpython.annotator import model as annmodel - + def g(i): if i: f = f1 @@ -1765,7 +1748,7 @@ f = f2 f(5,4) f(3,2) - + a.build_types(g, [int]) from rpython.rtyper import rtyper @@ -1838,10 +1821,10 @@ s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() - + from rpython.rtyper.llinterp import LLInterpreter interp = LLInterpreter(rt) - + #a.translator.view() c_a = A_repr.convert_const(A(None)) res = interp.eval_graph(ll_h_graph, [None, None, c_a]) @@ -1889,15 +1872,15 @@ s_R = a.bookkeeper.immutablevalue(r_f) s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). - getuniqueclassdef()) + getuniqueclassdef()) ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomePtr(A_repr.lowleveltype)]) s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() - from rpython.rtyper.llinterp import LLInterpreter + from rpython.rtyper.llinterp import LLInterpreter interp = LLInterpreter(rt) - + # low-level value is just the instance c_f = rclass.getinstancerepr(rt, Impl_def).convert_const(Impl()) c_a = A_repr.convert_const(A(None)) @@ -1951,7 +1934,7 @@ assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() - from rpython.rtyper.llinterp import LLInterpreter + from rpython.rtyper.llinterp import LLInterpreter interp = LLInterpreter(rt) c_f = r_f.convert_const(i.f) @@ -1961,7 +1944,7 @@ # ____________________________________________________________ -class TestLLtypeSmallFuncSets(TestLLtype): +class TestSmallFuncSets(TestRPBC): def setup_class(cls): from rpython.config.translationoption import get_combined_translation_config cls.config = get_combined_translation_config(translating=True) @@ -1969,7 +1952,7 @@ def interpret(self, fn, args, **kwds): kwds['config'] = self.config - return TestLLtype.interpret(self, fn, args, **kwds) + return TestRPBC.interpret(self, fn, args, **kwds) def test_smallfuncsets_basic(): from rpython.translator.translator import TranslationContext, graphof diff --git a/rpython/rtyper/test/test_rrange.py b/rpython/rtyper/test/test_rrange.py --- a/rpython/rtyper/test/test_rrange.py +++ b/rpython/rtyper/test/test_rrange.py @@ -1,20 +1,21 @@ from rpython.rlib.rarithmetic import intmask from rpython.rtyper.rrange import ll_rangelen, ll_rangeitem, ll_rangeitem_nonneg, dum_nocheck -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.lltypesystem import rrange +from rpython.rtyper.test.tool import BaseRtypingTest -class BaseTestRrange(BaseRtypingTest): +class TestRrange(BaseRtypingTest): def test_rlist_range(self): def test1(start, stop, step, varstep): expected = range(start, stop, step) length = len(expected) if varstep: - l = self.rrange.ll_newrangest(start, stop, step) + l = rrange.ll_newrangest(start, stop, step) step = l.step else: - RANGE = self.rrange.RangeRepr(step).RANGE - l = self.rrange.ll_newrange(RANGE, start, stop) + RANGE = rrange.RangeRepr(step).RANGE + l = rrange.ll_newrange(RANGE, start, stop) assert ll_rangelen(l, step) == length lst = [ll_rangeitem(dum_nocheck, l, i, step) for i in range(length)] assert lst == expected @@ -183,12 +184,3 @@ return 5 res = self.interpret(fn, [1]) assert res == 20 - - - -class TestLLtype(BaseTestRrange, LLRtypeMixin): - from rpython.rtyper.lltypesystem import rrange - - -class TestOOtype(BaseTestRrange, OORtypeMixin): - from rpython.rtyper.ootypesystem import rrange diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -7,7 +7,7 @@ from rpython.rtyper.lltypesystem.rstr import LLHelpers, STR from rpython.rtyper.rstr import AbstractLLHelpers from rpython.rtyper.rtyper import TyperError -from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin +from rpython.rtyper.test.tool import BaseRtypingTest def test_parse_fmt(): @@ -1073,7 +1073,7 @@ res = interpret(g, [-2]) assert res._obj.value == 42 -class BaseTestRstr(AbstractTestRstr): +class TestRstr(AbstractTestRstr): const = str constchar = chr @@ -1089,9 +1089,6 @@ for c in ["a", "A", "1"]: assert self.interpret(fn, [ord(c)]) == c.upper() - -class TestLLtype(BaseTestRstr, LLRtypeMixin): From noreply at buildbot.pypy.org Tue Jul 9 03:00:36 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:36 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Fix rpython/rtyper/test/ Message-ID: <20130709010036.9E9781C0170@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65272:b41acf2f10bc Date: 2013-07-07 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/b41acf2f10bc/ Log: Fix rpython/rtyper/test/ diff --git a/rpython/rtyper/test/test_llann.py b/rpython/rtyper/test/test_llann.py --- a/rpython/rtyper/test/test_llann.py +++ b/rpython/rtyper/test/test_llann.py @@ -7,7 +7,6 @@ cast_instance_to_base_ptr, cast_base_ptr_to_instance, base_ptr_lltype) from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem.lltype import * -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.rclass import fishllattr from rpython.rtyper.test.test_llinterp import interpret from rpython.translator.translator import TranslationContext @@ -463,34 +462,6 @@ res = interpret(h, [8, 5, 2]) assert res == 99 -def test_oohelper(): - S = ootype.Instance('S', ootype.ROOT, {'x': Signed, 'y': Signed}) - def f(s,z): - #assert we_are_translated() - return s.x*s.y+z - - def g(s): - #assert we_are_translated() - return s.x+s.y - - F = ootype.StaticMethod([S, Signed], Signed) - G = ootype.StaticMethod([S], Signed) - - def h(x, y, z): - s = ootype.new(S) - s.x = x - s.y = y - fsm = llhelper(F, f) - gsm = llhelper(G, g) - assert typeOf(fsm) == F - return fsm(s, z)+fsm(s, z*2)+gsm(s) - - res = h(8, 5, 2) - assert res == 99 - res = interpret(h, [8, 5, 2], type_system='ootype') - assert res == 99 - - def test_cast_instance_to_base_ptr(): class A: diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -26,14 +26,14 @@ -def timelog(prefix, call, *args, **kwds): +def timelog(prefix, call, *args, **kwds): #import time - #print prefix, "...", + #print prefix, "...", #start = time.time() - res = call(*args, **kwds) - #elapsed = time.time() - start + res = call(*args, **kwds) + #elapsed = time.time() - start #print "%.2f secs" % (elapsed,) - return res + return res def gengraph(func, argtypes=[], viewbefore='auto', policy=None, type_system="lltype", backendopt=False, config=None, @@ -49,9 +49,9 @@ t.view() global typer # we need it for find_exception typer = t.buildrtyper(type_system=type_system) - timelog("rtyper-specializing", typer.specialize) + timelog("rtyper-specializing", typer.specialize) #t.view() - timelog("checking graphs", t.checkgraphs) + timelog("checking graphs", t.checkgraphs) if backendopt: from rpython.translator.backendopt.all import backend_optimizations backend_optimizations(t) @@ -93,9 +93,9 @@ **extraconfigopts) interp = LLInterpreter(typer) _tcache[key] = (t, interp, graph) - # keep the cache small - _lastinterpreted.append(key) - if len(_lastinterpreted) >= 4: + # keep the cache small + _lastinterpreted.append(key) + if len(_lastinterpreted) >= 4: del _tcache[_lastinterpreted.pop(0)] if view == 'auto': view = getattr(option, 'view', False) @@ -164,8 +164,6 @@ assert res == 41 interpret_raises(IndexError, raise_exception, [42]) interpret_raises(ValueError, raise_exception, [43]) - interpret_raises(IndexError, raise_exception, [42], type_system="ootype") - interpret_raises(ValueError, raise_exception, [43], type_system="ootype") def test_call_raise(): res = interpret(call_raise, [41]) @@ -273,7 +271,7 @@ print res for i in range(3): assert res.ll_items()[i] == 3-i - + def test_list_pop(): def f(): l = [1,2,3] @@ -485,25 +483,6 @@ except ValueError: raise TypeError -def test_llinterp_fail(): - def aa(i): - if i: - raise TypeError() - - def bb(i): - try: - aa(i) - except TypeError: - pass - - t = TranslationContext() - annotator = t.buildannotator() - annotator.build_types(bb, [int]) - t.buildrtyper(type_system="ootype").specialize() - graph = graphof(t, bb) - interp = LLInterpreter(t.rtyper) - res = interp.eval_graph(graph, [1]) - def test_half_exceptiontransformed_graphs(): from rpython.translator import exceptiontransform def f1(x): @@ -581,7 +560,7 @@ free(t, flavor='raw') interpret(f, [1]) py.test.raises(leakfinder.MallocMismatch, "interpret(f, [0])") - + def f(): t1 = malloc(T, flavor='raw') t2 = malloc(T, flavor='raw') @@ -615,7 +594,7 @@ def test_scoped_allocator(): from rpython.rtyper.lltypesystem.lltype import scoped_alloc, Array, Signed T = Array(Signed) - + def f(): x = 0 with scoped_alloc(T, 1) as array: @@ -630,12 +609,12 @@ def external(): pass - + def raising(): raise OSError(15, "abcd") - + ext = register_external(external, [], llimpl=raising, llfakeimpl=raising) - + def f(): # this is a useful llfakeimpl that raises an exception try: diff --git a/rpython/rtyper/test/test_ootype_llinterp.py b/rpython/rtyper/test/test_ootype_llinterp.py deleted file mode 100644 --- a/rpython/rtyper/test/test_ootype_llinterp.py +++ /dev/null @@ -1,28 +0,0 @@ -from rpython.rtyper.ootypesystem.ootype import * -from rpython.rtyper.test.test_llinterp import interpret - -def test_simple_field(): - C = Instance("test", ROOT, {'a': (Signed, 3)}) - - def f(): - c = new(C) - c.a = 5 - return c.a - - result = interpret(f, [], type_system="ootype") - assert result == 5 - -def test_simple_method(): - C = Instance("test", ROOT, {'a': (Signed, 3)}) - M = Meth([], Signed) - def m_(self): - return self.a - m = meth(M, _name="m", _callable=m_) - addMethods(C, {"m": m}) - - def f(): - c = new(C) - return c.m() - - result = interpret(f, [], type_system="ootype") - assert result == 3 diff --git a/rpython/rtyper/test/test_rtyper.py b/rpython/rtyper/test/test_rtyper.py --- a/rpython/rtyper/test/test_rtyper.py +++ b/rpython/rtyper/test/test_rtyper.py @@ -115,21 +115,3 @@ assert rmodel.getgcflavor(DummyClsDescDef(A)) == 'gc' assert rmodel.getgcflavor(DummyClsDescDef(B)) == 'gc' assert rmodel.getgcflavor(DummyClsDescDef(R)) == 'raw' - -def test_missing_gvflavor_bug(): - class MyClass: - def set_x(self): - self.x = create_tuple() - def create_tuple(): - return MyClass(), 42 - def fn(): - obj = MyClass() - obj.set_x() - create_tuple() - t = TranslationContext() - t.buildannotator().build_types(fn, []) - t.buildrtyper(type_system='ootype').specialize() - #t.view() - t.checkgraphs() - graph = graphof(t, fn) - assert graph.getreturnvar().concretetype == Void From noreply at buildbot.pypy.org Tue Jul 9 03:00:37 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:37 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove CLI leftovers Message-ID: <20130709010037.CD46F1C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65273:96cd77ae4ae1 Date: 2013-07-07 19:35 +0200 http://bitbucket.org/pypy/pypy/changeset/96cd77ae4ae1/ Log: Remove CLI leftovers diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -141,25 +141,3 @@ def _check_cbuilder(self, cbuilder): pass - -class CliCompiledMixin(BaseCompiledMixin): - type_system = 'ootype' - - def pre_translation_hook(self): - from rpython.translator.oosupport.support import patch_os - self.olddefs = patch_os() - - def post_translation_hook(self): - from rpython.translator.oosupport.support import unpatch_os - unpatch_os(self.olddefs) # restore original values - - def _compile_and_run(self, t, entry_point, entry_point_graph, args): - from rpython.translator.cli.test.runtest import compile_graph - func = compile_graph(entry_point_graph, t, nowrap=True, standalone=True) - return func(*args) - - def run_directly(self, fn, args): - from rpython.translator.cli.test.runtest import compile_function, get_annotation - ann = [get_annotation(x) for x in args] - clifunc = compile_function(fn, ann) - return clifunc(*args) diff --git a/rpython/rlib/parsing/test/test_tree.py b/rpython/rlib/parsing/test/test_tree.py --- a/rpython/rlib/parsing/test/test_tree.py +++ b/rpython/rlib/parsing/test/test_tree.py @@ -7,36 +7,36 @@ def test_nonterminal_simple(self): pos = SourcePos(1,2,3) - tree = Nonterminal(symbol="a", + tree = Nonterminal(symbol="a", children=[ - Symbol(symbol="b", - additional_info="b", + Symbol(symbol="b", + additional_info="b", token=Token(name="B", source="b", source_pos=pos))]) assert tree.getsourcepos() == pos - + def test_nonterminal_nested(self): pos = SourcePos(1,2,3) - tree = Nonterminal(symbol="a", + tree = Nonterminal(symbol="a", children=[ Nonterminal(symbol="c", children=[ - Symbol(symbol="b", - additional_info="b", + Symbol(symbol="b", + additional_info="b", token=Token(name="B", source="b", source_pos=pos))])]) assert tree.getsourcepos() == pos - + def test_nonterminal_simple_empty(self): - tree = Nonterminal(symbol="a", + tree = Nonterminal(symbol="a", children=[]) assert len(tree.children) == 0 # trivial py.test.raises(IndexError, tree.getsourcepos) def test_nonterminal_nested_empty(self): - tree = Nonterminal(symbol="a", + tree = Nonterminal(symbol="a", children=[Nonterminal(symbol="c", children=[Nonterminal(symbol="c", children=[Nonterminal(symbol="c", @@ -47,14 +47,14 @@ assert len(tree.children) != 0 # the not-so-trivial part. py.test.raises(IndexError, tree.getsourcepos) -class BaseTestTreeTranslated(object): - +class TestTreeTranslated(object): def compile(self, f): - raise NotImplementedError - + from rpython.translator.c.test.test_genc import compile + return compile(f, []) + def test_nonterminal_simple_empty(self): def foo(): - tree = Nonterminal(symbol="a", + tree = Nonterminal(symbol="a", children=[]) try: return tree.getsourcepos() @@ -65,7 +65,7 @@ def test_nonterminal_nested_empty(self): def foo(): - tree = Nonterminal(symbol="a", + tree = Nonterminal(symbol="a", children=[Nonterminal(symbol="c", children=[Nonterminal(symbol="c", children=[Nonterminal(symbol="c", @@ -79,18 +79,3 @@ return -42 f = self.compile(foo) assert f() == -42 - - -class TestTreeTranslatedLLType(BaseTestTreeTranslated): - - def compile(self, f): - from rpython.translator.c.test.test_genc import compile - return compile(f, []) - -class TestTreeTranslatedOOType(BaseTestTreeTranslated): - - def compile(self, f): - from rpython.translator.cli.test.runtest import compile_function - return compile_function(f, [], auto_raise_exc=True, exctrans=True) - - From noreply at buildbot.pypy.org Tue Jul 9 03:00:39 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:39 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove CLI tasks from translator driver Message-ID: <20130709010039.0BD581C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65274:d5d53228995b Date: 2013-07-07 19:52 +0200 http://bitbucket.org/pypy/pypy/changeset/d5d53228995b/ Log: Remove CLI tasks from translator driver diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -60,7 +60,7 @@ env = os.environ.copy() env['PYPY_INSTRUMENT_COUNTERS'] = str(self.datafile) self.compiler.platform.execute(exe, args, env=env) - + def after(self): # xxx os._exit(0) @@ -99,7 +99,7 @@ default_goal, = self.backend_select_goals([default_goal]) if default_goal in self._maybe_skip(): default_goal = None - + self.default_goal = default_goal self.extra_goals = [] self.exposed = [] @@ -142,7 +142,7 @@ def set_backend_extra_options(self, extra_options): self._backend_extra_options = extra_options - + def get_info(self): # XXX more? d = {'backend': self.config.translation.backend} return d @@ -163,7 +163,7 @@ new_goal = cand break else: - raise Exception, "cannot infer complete goal from: %r" % goal + raise Exception, "cannot infer complete goal from: %r" % goal l.append(new_goal) return l @@ -423,7 +423,7 @@ from rpython.translator.transform import insert_ll_stackcheck count = insert_ll_stackcheck(self.translator) self.log.info("inserted %d stack checks." % (count,)) - + def possibly_check_for_boehm(self): if self.config.translation.gc == "boehm": @@ -537,7 +537,7 @@ def task_llinterpret_lltype(self): from rpython.rtyper.llinterp import LLInterpreter py.log.setconsumer("llinterp operation", None) - + translator = self.translator interp = LLInterpreter(translator.rtyper) bk = translator.annotator.bookkeeper @@ -548,95 +548,6 @@ log.llinterpret.event("result -> %s" % v) - @taskdef(["?" + OOBACKENDOPT, OOTYPE], 'Generating CLI source') - def task_source_cli(self): - from rpython.translator.cli.gencli import GenCli - from rpython.translator.cli.entrypoint import get_entrypoint - - if self.entry_point is not None: # executable mode - entry_point_graph = self.translator.graphs[0] - entry_point = get_entrypoint(entry_point_graph) - else: - # library mode - assert self.libdef is not None - bk = self.translator.annotator.bookkeeper - entry_point = self.libdef.get_entrypoint(bk) - - self.gen = GenCli(udir, self.translator, entry_point, config=self.config) - filename = self.gen.generate_source() - self.log.info("Wrote %s" % (filename,)) - - @taskdef(['source_cli'], 'Compiling CLI source') - def task_compile_cli(self): - from rpython.translator.oosupport.support import unpatch_os - from rpython.translator.cli.test.runtest import CliFunctionWrapper - filename = self.gen.build_exe() - self.c_entryp = CliFunctionWrapper(filename) - # restore original os values - if hasattr(self, 'old_cli_defs'): - unpatch_os(self.old_cli_defs) - - self.log.info("Compiled %s" % filename) - if self.standalone and self.exe_name: - self.copy_cli_exe() - - def copy_cli_exe(self): - # XXX messy - main_exe = self.c_entryp._exe - usession_path, main_exe_name = os.path.split(main_exe) - pypylib_dll = os.path.join(usession_path, 'pypylib.dll') - - basename = self.exe_name % self.get_info() - dirname = basename + '-data/' - if '/' not in dirname and '\\' not in dirname: - dirname = './' + dirname - - if not os.path.exists(dirname): - os.makedirs(dirname) - shutil.copy(main_exe, dirname) - shutil.copy(pypylib_dll, dirname) - if bool(os.getenv('PYPY_GENCLI_COPYIL')): - shutil.copy(os.path.join(usession_path, 'main.il'), dirname) - newexename = basename - f = file(newexename, 'w') - f.write(r"""#!/bin/bash -LEDIT=`type -p ledit` -EXE=`readlink $0` -if [ -z $EXE ] -then - EXE=$0 -fi -if uname -s | grep -iq Cygwin -then - MONO= -else - MONO=mono - # workaround for known mono buggy versions - VER=`mono -V | head -1 | sed s/'Mono JIT compiler version \(.*\) (.*'/'\1/'` - if [[ 2.1 < "$VER" && "$VER" < 2.4.3 ]] - then - MONO="mono -O=-branch" - fi -fi -$LEDIT $MONO "$(dirname $EXE)/$(basename $EXE)-data/%s" "$@" # XXX doesn't work if it's placed in PATH -""" % main_exe_name) - f.close() - os.chmod(newexename, 0755) - - def copy_cli_dll(self): - dllname = self.gen.outfile - usession_path, dll_name = os.path.split(dllname) - pypylib_dll = os.path.join(usession_path, 'pypylib.dll') - shutil.copy(dllname, '.') - shutil.copy(pypylib_dll, '.') - - # main.exe is a stub but is needed right now because it's - # referenced by pypylib.dll. Will be removed in the future - translator_path, _ = os.path.split(__file__) - main_exe = os.path.join(translator_path, 'cli/src/main.exe') - shutil.copy(main_exe, '.') - self.log.info("Copied to %s" % os.path.join(os.getcwd(), dllname)) - @taskdef(["?" + OOBACKENDOPT, OOTYPE], 'Generating JVM source') def task_source_jvm(self): from rpython.translator.jvm.genjvm import GenJvm @@ -747,7 +658,7 @@ if backend in ('cli', 'jvm'): from rpython.translator.oosupport.support import patch_os driver.old_cli_defs = patch_os() - + target = targetspec_dic['target'] spec = target(driver, args) @@ -757,8 +668,8 @@ entry_point, inputtypes = spec policy = None - driver.setup(entry_point, inputtypes, - policy=policy, + driver.setup(entry_point, inputtypes, + policy=policy, extra=targetspec_dic, empty_translator=empty_translator) @@ -770,7 +681,7 @@ assert 'rpython.rtyper.rmodel' not in sys.modules, ( "cannot fork because the rtyper has already been imported") prereq_checkpt_rtype_lltype = prereq_checkpt_rtype - prereq_checkpt_rtype_ootype = prereq_checkpt_rtype + prereq_checkpt_rtype_ootype = prereq_checkpt_rtype # checkpointing support def _event(self, kind, goal, func): From noreply at buildbot.pypy.org Tue Jul 9 03:00:40 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:40 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove OOJitMixin Message-ID: <20130709010040.4A2E51C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65275:7ae668fca267 Date: 2013-07-07 20:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7ae668fca267/ Log: Remove OOJitMixin diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -263,39 +263,6 @@ NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed), ('next', lltype.Ptr(NODE)))) return NODE - -class OOJitMixin(JitMixin): - type_system = 'ootype' - #CPUClass = runner.OOtypeCPU - - def setup_class(cls): - py.test.skip("ootype tests skipped for now") - - @staticmethod - def Ptr(T): - return T - - @staticmethod - def GcStruct(name, *fields, **kwds): - if 'hints' in kwds: - kwds['_hints'] = kwds['hints'] - del kwds['hints'] - I = ootype.Instance(name, ootype.ROOT, dict(fields), **kwds) - return I - - malloc = staticmethod(ootype.new) - nullptr = staticmethod(ootype.null) - - @staticmethod - def malloc_immortal(T): - return ootype.new(T) - - def _get_NODE(self): - NODE = ootype.Instance('NODE', ootype.ROOT, {}) - NODE._add_fields({'value': ootype.Signed, - 'next': NODE}) - return NODE - # ____________________________________________________________ class _Foo: diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4,7 +4,7 @@ from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp import history -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst +from rpython.jit.metainterp.test.support import LLJitMixin, noConst from rpython.jit.metainterp.warmspot import get_stats from rpython.rlib import rerased from rpython.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside, @@ -3045,86 +3045,6 @@ i += 1 res = self.meta_interp(f, [32]) assert res == f(32) - -class XXXDisabledTestOOtype(BasicTests, OOJitMixin): - - def test_oohash(self): - def f(n): - s = ootype.oostring(n, -1) - return s.ll_hash() - res = self.interp_operations(f, [5]) - assert res == ootype.oostring(5, -1).ll_hash() - - def test_identityhash(self): - A = ootype.Instance("A", ootype.ROOT) - def f(): - obj1 = ootype.new(A) - obj2 = ootype.new(A) - return ootype.identityhash(obj1) == ootype.identityhash(obj2) - assert not f() - res = self.interp_operations(f, []) - assert not res - - def test_oois(self): - A = ootype.Instance("A", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - if n: - obj2 = obj1 - else: - obj2 = ootype.new(A) - return obj1 is obj2 - res = self.interp_operations(f, [0]) - assert not res - res = self.interp_operations(f, [1]) - assert res - - def test_oostring_instance(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", ootype.ROOT) - def f(n): - obj1 = ootype.new(A) - obj2 = ootype.new(B) - s1 = ootype.oostring(obj1, -1) - s2 = ootype.oostring(obj2, -1) - ch1 = s1.ll_stritem_nonneg(1) - ch2 = s2.ll_stritem_nonneg(1) - return ord(ch1) + ord(ch2) - res = self.interp_operations(f, [0]) - assert res == ord('A') + ord('B') - - def test_subclassof(self): - A = ootype.Instance("A", ootype.ROOT) - B = ootype.Instance("B", A) - clsA = ootype.runtimeClass(A) - clsB = ootype.runtimeClass(B) - myjitdriver = JitDriver(greens = [], reds = ['n', 'flag', 'res']) - - def getcls(flag): - if flag: - return clsA - else: - return clsB - - def f(flag, n): - res = True - while n > -100: - myjitdriver.can_enter_jit(n=n, flag=flag, res=res) - myjitdriver.jit_merge_point(n=n, flag=flag, res=res) - cls = getcls(flag) - n -= 1 - res = ootype.subclassof(cls, clsB) - return res - - res = self.meta_interp(f, [1, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert not res - - res = self.meta_interp(f, [0, 100], - policy=StopAtXPolicy(getcls), - enable_opts='') - assert res class BaseLLtypeTests(BasicTests): @@ -3959,7 +3879,7 @@ def test_weakref(self): import weakref - + class A(object): def __init__(self, x): self.x = x @@ -3982,7 +3902,7 @@ def test_external_call(self): from rpython.rlib.objectmodel import invoke_around_extcall - + T = rffi.CArrayPtr(rffi.TIME_T) external = rffi.llexternal("time", [T], rffi.TIME_T) diff --git a/rpython/jit/metainterp/test/test_blackhole.py b/rpython/jit/metainterp/test/test_blackhole.py --- a/rpython/jit/metainterp/test/test_blackhole.py +++ b/rpython/jit/metainterp/test/test_blackhole.py @@ -1,6 +1,6 @@ import py from rpython.rlib.jit import JitDriver -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.blackhole import BlackholeInterpBuilder from rpython.jit.metainterp.blackhole import BlackholeInterpreter from rpython.jit.metainterp.blackhole import convert_and_run_from_pyjitpl diff --git a/rpython/jit/metainterp/test/test_del.py b/rpython/jit/metainterp/test/test_del.py --- a/rpython/jit/metainterp/test/test_del.py +++ b/rpython/jit/metainterp/test/test_del.py @@ -2,7 +2,7 @@ from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib import rgc -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin class DelTests: @@ -125,8 +125,3 @@ class TestLLtype(DelTests, LLJitMixin): pass - -class TestOOtype(DelTests, OOJitMixin): - def setup_class(cls): - py.test.skip("XXX dels are not implemented in the" - " static CLI or JVM backend") diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -1,5 +1,5 @@ import py -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver from rpython.rlib import objectmodel @@ -163,7 +163,7 @@ def test_unrolling_of_dict_iter(self): driver = JitDriver(greens = [], reds = ['n']) - + def f(n): while n > 0: driver.jit_merge_point(n=n) @@ -178,8 +178,5 @@ 'jump': 1}) -class TestOOtype(DictTests, OOJitMixin): - pass - class TestLLtype(DictTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_exception.py b/rpython/jit/metainterp/test/test_exception.py --- a/rpython/jit/metainterp/test/test_exception.py +++ b/rpython/jit/metainterp/test/test_exception.py @@ -1,5 +1,5 @@ import py, sys -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from rpython.jit.codewriter.policy import StopAtXPolicy @@ -43,7 +43,7 @@ def check(n): if n % 2: raise ValueError - + def f(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -65,7 +65,7 @@ def check(n): if n % 2 == 0: raise ValueError - + def f(n): while n > 0: myjitdriver.can_enter_jit(n=n) @@ -116,7 +116,7 @@ return e.n def f(n): return a(n) - + res = self.interp_operations(f, [-4]) assert res == -40 @@ -476,7 +476,7 @@ class SomeException(Exception): pass - + def portal(n): while n > 0: jitdriver.can_enter_jit(n=n) @@ -525,7 +525,7 @@ def x(n): if n == 1: raise MyError(n) - + def f(n): try: while n > 0: @@ -535,7 +535,7 @@ n -= 1 except MyError: z() - + def z(): raise ValueError @@ -617,8 +617,5 @@ self.n = n -class TestOOtype(ExceptionTests, OOJitMixin): - pass - class TestLLtype(ExceptionTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_float.py b/rpython/jit/metainterp/test/test_float.py --- a/rpython/jit/metainterp/test/test_float.py +++ b/rpython/jit/metainterp/test/test_float.py @@ -1,5 +1,5 @@ import math, sys -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.rarithmetic import intmask, r_uint @@ -75,8 +75,5 @@ assert type(res) is float and res == float(long(r_uint(-12345))) -class TestOOtype(FloatTests, OOJitMixin): - pass - class TestLLtype(FloatTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_greenfield.py b/rpython/jit/metainterp/test/test_greenfield.py --- a/rpython/jit/metainterp/test/test_greenfield.py +++ b/rpython/jit/metainterp/test/test_greenfield.py @@ -1,4 +1,4 @@ -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver @@ -55,6 +55,3 @@ class TestLLtypeGreenFieldsTests(GreenFieldsTests, LLJitMixin): pass - -class TestOOtypeGreenFieldsTests(GreenFieldsTests, OOJitMixin): - pass diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -1,5 +1,5 @@ from rpython.rlib import jit -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin @jit.dont_look_inside def escape(x): @@ -177,6 +177,3 @@ class TestLLtypeImmutableFieldsTests(ImmutableFieldsTests, LLJitMixin): pass - -class TestOOtypeImmutableFieldsTests(ImmutableFieldsTests, OOJitMixin): - pass diff --git a/rpython/jit/metainterp/test/test_jitdriver.py b/rpython/jit/metainterp/test/test_jitdriver.py --- a/rpython/jit/metainterp/test/test_jitdriver.py +++ b/rpython/jit/metainterp/test/test_jitdriver.py @@ -1,6 +1,6 @@ """Tests for multiple JitDrivers.""" from rpython.rlib.jit import JitDriver, unroll_safe, set_param -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.warmspot import get_stats @@ -145,6 +145,3 @@ class TestLLtype(MultipleJitDriversTests, LLJitMixin): pass - -class TestOOtype(MultipleJitDriversTests, OOJitMixin): - pass diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -1,7 +1,7 @@ import py from rpython.rlib.objectmodel import newlist_hint from rpython.rlib.jit import JitDriver, promote -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin class ListTests: @@ -288,9 +288,6 @@ self.check_resops(call=0) -class TestOOtype(ListTests, OOJitMixin): - pass - class TestLLtype(ListTests, LLJitMixin): def test_listops_dont_invalidate_caches(self): class A(object): diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -2,16 +2,16 @@ from rpython.rlib.jit import JitDriver, hint, set_param from rpython.rlib.objectmodel import compute_hash from rpython.jit.metainterp.warmspot import ll_meta_interp, get_stats -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp import history class LoopTest(object): enable_opts = '' - + automatic_promotion_result = { - 'int_add' : 6, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1, + 'int_add' : 6, 'int_gt' : 1, 'guard_false' : 1, 'jump' : 1, 'guard_value' : 3 } @@ -43,7 +43,7 @@ class A(object): def __init__(self): self.x = 3 - + def f(x, y): res = 0 a = A() @@ -70,7 +70,7 @@ def l(y, x, t): llop.debug_print(lltype.Void, y, x, t) - + def g(y, x, r): if y <= 12: res = x - 2 @@ -241,7 +241,7 @@ def can_enter_jit(i, x, node): myjitdriver.can_enter_jit(i=i, x=x, node=node) - + def f(node): x = 0 i = 0 @@ -362,16 +362,16 @@ # # i = j = x = 0 # while i < n: - # j = 0 + # j = 0 # while j <= i: # j = j + 1 # x = x + (i&j) # i = i + 1 - + myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'j', 'n', 'x']) bytecode = "IzJxji" def f(n, threshold): - set_param(myjitdriver, 'threshold', threshold) + set_param(myjitdriver, 'threshold', threshold) i = j = x = 0 pos = 0 op = '-' @@ -418,7 +418,7 @@ myjitdriver = JitDriver(greens = ['pos'], reds = ['i', 'j', 'n', 'x']) bytecode = "IzJxji" def f(nval, threshold): - set_param(myjitdriver, 'threshold', threshold) + set_param(myjitdriver, 'threshold', threshold) i, j, x = A(0), A(0), A(0) n = A(nval) pos = 0 @@ -477,30 +477,30 @@ if not (i < n): pos += 2 elif op == '7': - if s==1: + if s==1: x = x + 7 else: x = x + 2 elif op == '8': - if s==1: + if s==1: x = x + 8 else: x = x + 3 pos += 1 return x - + def g(n, s): sa = 0 for i in range(7): sa += f(n, s) return sa - assert self.meta_interp(g, [25, 1]) == 7 * 25 * (7 + 8) + assert self.meta_interp(g, [25, 1]) == 7 * 25 * (7 + 8) def h(n): return g(n, 1) + g(n, 2) - assert self.meta_interp(h, [25]) == 7 * 25 * (7 + 8 + 2 + 3) - + assert self.meta_interp(h, [25]) == 7 * 25 * (7 + 8 + 2 + 3) + def test_three_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x']) bytecode = ".+357" @@ -599,7 +599,7 @@ z = Z(z.elem + 1) x -= 1 return z.elem - + expected = f(100, 5) res = self.meta_interp(f, [100, 5], policy=StopAtXPolicy(externfn)) assert res == expected @@ -617,16 +617,16 @@ CO_INCREASE = 0 CO_JUMP_BACK_3 = 1 CO_DECREASE = 2 - + code = [CO_INCREASE, CO_INCREASE, CO_INCREASE, CO_JUMP_BACK_3, CO_INCREASE, CO_DECREASE] - + def add(res, a): return res + a def sub(res, a): return res - a - + def main_interpreter_loop(a): i = 0 res = 0 @@ -663,16 +663,16 @@ reds = ['res', 'a']) CO_INCREASE = 0 CO_JUMP_BACK_3 = 1 - + code = [CO_INCREASE, CO_INCREASE, CO_INCREASE, CO_JUMP_BACK_3, CO_INCREASE] - + def add(res, a): return res + a def sub(res, a): return res - a - + def main_interpreter_loop(a): i = 0 res = 0 @@ -729,7 +729,7 @@ def __init__(self, lst): self.lst = lst codes = [Code([]), Code([0, 0, 1, 1])] - + def interpret(num): code = codes[num] p = 0 @@ -810,7 +810,7 @@ some_fn(Stuff(n), k, z) return 0 - res = self.meta_interp(f, [200]) + res = self.meta_interp(f, [200]) def test_regular_pointers_in_short_preamble(self): from rpython.rtyper.lltypesystem import lltype @@ -908,8 +908,5 @@ res = self.meta_interp(f, [20, 10]) assert res == f(20, 10) -class TestOOtype(LoopTest, OOJitMixin): - pass - class TestLLtype(LoopTest, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_loop_unroll.py b/rpython/jit/metainterp/test/test_loop_unroll.py --- a/rpython/jit/metainterp/test/test_loop_unroll.py +++ b/rpython/jit/metainterp/test/test_loop_unroll.py @@ -1,22 +1,18 @@ import py from rpython.rlib.jit import JitDriver from rpython.jit.metainterp.test import test_loop -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES class LoopUnrollTest(test_loop.LoopTest): enable_opts = ALL_OPTS_NAMES - + automatic_promotion_result = { 'int_gt': 2, 'guard_false': 2, 'jump': 1, 'int_add': 6, - 'guard_value': 1 + 'guard_value': 1 } # ====> test_loop.py class TestLLtype(LoopUnrollTest, LLJitMixin): pass - -class TestOOtype(LoopUnrollTest, OOJitMixin): - pass - diff --git a/rpython/jit/metainterp/test/test_loop_unroll_disopt.py b/rpython/jit/metainterp/test/test_loop_unroll_disopt.py --- a/rpython/jit/metainterp/test/test_loop_unroll_disopt.py +++ b/rpython/jit/metainterp/test/test_loop_unroll_disopt.py @@ -1,7 +1,7 @@ import py from rpython.rlib.jit import JitDriver from rpython.jit.metainterp.test import test_loop -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES allopts = ALL_OPTS_NAMES.split(':') @@ -22,4 +22,4 @@ del TestLLtype # No need to run the last set twice del TestLoopNoUnrollLLtype # This case is take care of by test_loop - + diff --git a/rpython/jit/metainterp/test/test_math.py b/rpython/jit/metainterp/test/test_math.py --- a/rpython/jit/metainterp/test/test_math.py +++ b/rpython/jit/metainterp/test/test_math.py @@ -1,16 +1,16 @@ import math -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.rfloat import isinf, isnan, INFINITY, NAN class MathTests: - + def test_math_sqrt(self): def f(x): try: return math.sqrt(x) except ValueError: return -INFINITY - + res = self.interp_operations(f, [0.0]) assert res == 0.0 self.check_operations_history(call_pure=1) @@ -40,8 +40,5 @@ self.check_operations_history(call_pure=0) -class TestOOtype(MathTests, OOJitMixin): - pass - class TestLLtype(MathTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -3,7 +3,7 @@ from rpython.rlib.jit import unroll_safe, dont_look_inside, promote from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import fatalerror -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.rtyper.annlowlevel import hlstr from rpython.jit.metainterp.warmspot import get_stats @@ -33,7 +33,7 @@ myjitdriver = JitDriver(greens=[], reds=['n', 'm']) class Error(Exception): pass - + def f(n): m = n - 2 while True: @@ -246,7 +246,7 @@ class Exc(Exception): pass - + def f(code, n): pc = 0 while pc < len(code): @@ -286,7 +286,7 @@ return "%s %d %s" % (code, pc, code[pc]) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], get_printable_location=p) - + def f(code, n): pc = 0 while pc < len(code): @@ -309,7 +309,7 @@ return n def main(n): set_param(None, 'threshold', 3) - set_param(None, 'trace_eagerness', 5) + set_param(None, 'trace_eagerness', 5) return f("c-l", n) expected = main(100) res = self.meta_interp(main, [100], enable_opts='', inline=True) @@ -328,7 +328,7 @@ if n > 0: return recursive(n - 1) + 1 return 0 - def loop(n): + def loop(n): set_param(myjitdriver, "threshold", 10) pc = 0 while n: @@ -411,8 +411,8 @@ self.i7 = i7 self.i8 = i8 self.i9 = i9 - - + + def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -444,8 +444,8 @@ self.i7 = i7 self.i8 = i8 self.i9 = i9 - - + + def loop(n): i = 0 o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -467,7 +467,7 @@ res = self.meta_interp(main, [20], failargs_limit=FAILARGS_LIMIT, listops=True) assert not res - self.check_aborted_count(5) + self.check_aborted_count(5) def test_set_param_inlining(self): myjitdriver = JitDriver(greens=[], reds=['n', 'recurse']) @@ -480,7 +480,7 @@ myjitdriver.can_enter_jit(n=n, recurse=recurse) return n TRACE_LIMIT = 66 - + def main(inline): set_param(None, "threshold", 10) set_param(None, 'function_threshold', 60) @@ -502,7 +502,7 @@ return "'%s' at %d: %s" % (code, pc, code[pc]) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], get_printable_location=p) - + def f(code, n): pc = 0 while pc < len(code): @@ -543,7 +543,7 @@ return "%s %d %s" % (code, pc, code[pc]) myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'], get_printable_location=p) - + def f(code, n): pc = 0 while pc < len(code): @@ -576,7 +576,7 @@ result += f('-c-----------l-', i+100) self.meta_interp(g, [10], backendopt=True) self.check_aborted_count(1) - self.check_resops(call=0, call_assembler=2) + self.check_resops(call=0, call_assembler=2) self.check_jitcell_token_count(2) def test_directly_call_assembler(self): @@ -717,7 +717,7 @@ class MyException(Exception): def __init__(self, x): self.x = x - + driver = JitDriver(greens = ['codeno'], reds = ['i'], get_printable_location = lambda codeno : str(codeno)) @@ -736,7 +736,7 @@ raise MyException(1) self.meta_interp(portal, [2], inline=True) - self.check_history(call_assembler=1) + self.check_history(call_assembler=1) def test_directly_call_assembler_fail_guard(self): driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'], @@ -765,10 +765,10 @@ class Thing(object): def __init__(self, val): self.val = val - + class Frame(object): _virtualizable2_ = ['thing'] - + driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], get_printable_location = lambda codeno : str(codeno)) @@ -803,10 +803,10 @@ class Thing(object): def __init__(self, val): self.val = val - + class Frame(object): _virtualizable2_ = ['thing'] - + driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], get_printable_location = lambda codeno : str(codeno)) @@ -854,10 +854,10 @@ class Thing(object): def __init__(self, val): self.val = val - + class Frame(object): _virtualizable2_ = ['thing'] - + driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], get_printable_location = lambda codeno : str(codeno)) @@ -918,7 +918,7 @@ def main(codeno, n, a): frame = Frame([a, a+1, a+2, a+3], 0) return f(codeno, n, a, frame) - + def f(codeno, n, a, frame): x = 0 while n > 0: @@ -948,10 +948,10 @@ class Thing(object): def __init__(self, val): self.val = val - + class Frame(object): _virtualizable2_ = ['thing'] - + driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'], virtualizables = ['frame'], get_printable_location = lambda codeno : str(codeno)) @@ -1195,7 +1195,7 @@ def test_trace_from_start_always(self): from rpython.rlib.nonconst import NonConstant - + driver = JitDriver(greens = ['c'], reds = ['i', 'v']) def portal(c, i, v): @@ -1220,7 +1220,7 @@ def test_trace_from_start_does_not_prevent_inlining(self): driver = JitDriver(greens = ['c', 'bc'], reds = ['i']) - + def portal(bc, c, i): while True: driver.jit_merge_point(c=c, bc=bc, i=i) @@ -1229,7 +1229,7 @@ c += 1 else: return - if c == 10: # bc == 0 + if c == 10: # bc == 0 c = 0 if i >= 100: return @@ -1266,6 +1266,3 @@ class TestLLtype(RecursiveTests, LLJitMixin): pass - -class TestOOtype(RecursiveTests, OOJitMixin): - pass diff --git a/rpython/jit/metainterp/test/test_send.py b/rpython/jit/metainterp/test/test_send.py --- a/rpython/jit/metainterp/test/test_send.py +++ b/rpython/jit/metainterp/test/test_send.py @@ -1,10 +1,10 @@ import py from rpython.rlib.jit import JitDriver, promote, elidable, set_param from rpython.jit.codewriter.policy import StopAtXPolicy -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin class SendTests(object): - + def test_green_send(self): myjitdriver = JitDriver(greens = ['i'], reds = ['counter']) lst = ["123", "45"] @@ -149,7 +149,7 @@ def f(y): while y > 0: myjitdriver.can_enter_jit(y=y) - myjitdriver.jit_merge_point(y=y) + myjitdriver.jit_merge_point(y=y) w = externfn(y) w.foo() y -= 1 @@ -343,8 +343,8 @@ policy=StopAtXPolicy(State.externfn.im_func)) assert res == f(198) # we get two TargetTokens, one for the loop and one for the preamble - self.check_jitcell_token_count(1) - self.check_target_token_count(2) + self.check_jitcell_token_count(1) + self.check_target_token_count(2) def test_indirect_call_unknown_object_3(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'z', 'state']) @@ -436,7 +436,7 @@ # and 1 bridge going from the # loop back to the loop self.check_trace_count(2) # preamble/loop and 1 bridge - self.check_jitcell_token_count(1) + self.check_jitcell_token_count(1) self.check_target_token_count(3) # preamble, Int1, Int2 self.check_aborted_count(0) @@ -521,7 +521,7 @@ def test_recursive_call_to_portal_from_blackhole(self): from rpython.rtyper.annlowlevel import hlstr - + myjitdriver = JitDriver(greens = ['k'], reds = ['n']) def f(n, k): while n >= 0: @@ -634,9 +634,5 @@ res = self.meta_interp(fn, [20], policy=StopAtXPolicy(extern)) assert res == 21 - -class TestOOtype(SendTests, OOJitMixin): - pass - class TestLLtype(SendTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_slist.py b/rpython/jit/metainterp/test/test_slist.py --- a/rpython/jit/metainterp/test/test_slist.py +++ b/rpython/jit/metainterp/test/test_slist.py @@ -1,5 +1,5 @@ import py -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver class ListTests(object): @@ -94,9 +94,6 @@ assert res == 41 self.check_resops(call=0, guard_value=0) -# we don't support resizable lists on ootype -#class TestOOtype(ListTests, OOJitMixin): -# pass class TestLLtype(ListTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_string.py b/rpython/jit/metainterp/test/test_string.py --- a/rpython/jit/metainterp/test/test_string.py +++ b/rpython/jit/metainterp/test/test_string.py @@ -603,9 +603,6 @@ }) -#class TestOOtype(StringTests, OOJitMixin): -# CALL = "oosend" -# CALL_PURE = "oosend_pure" class TestLLtype(StringTests, LLJitMixin): CALL = "call" diff --git a/rpython/jit/metainterp/test/test_tl.py b/rpython/jit/metainterp/test/test_tl.py --- a/rpython/jit/metainterp/test/test_tl.py +++ b/rpython/jit/metainterp/test/test_tl.py @@ -1,6 +1,6 @@ import py from rpython.jit.codewriter.policy import StopAtXPolicy -from rpython.jit.metainterp.test.support import OOJitMixin, LLJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin class ToyLanguageTests: @@ -141,8 +141,5 @@ meth_func = meth.im_func del meth_func._jit_look_inside_ -class TestOOtype(ToyLanguageTests, OOJitMixin): - pass - class TestLLtype(ToyLanguageTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_tlc.py b/rpython/jit/metainterp/test/test_tlc.py --- a/rpython/jit/metainterp/test/test_tlc.py +++ b/rpython/jit/metainterp/test/test_tlc.py @@ -3,7 +3,7 @@ from rpython.jit.tl import tlc -from rpython.jit.metainterp.test.support import OOJitMixin, LLJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin class TLCTests: diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -2,7 +2,7 @@ from rpython.rlib.jit import JitDriver, promote, dont_look_inside from rpython.rlib.objectmodel import compute_unique_id from rpython.jit.codewriter.policy import StopAtXPolicy -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rtyper.lltypesystem import lltype, rclass, rffi from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.ootypesystem import ootype @@ -189,7 +189,7 @@ self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=2, new=0) - + def test_two_loops_with_escaping_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) def externfn(node): @@ -296,7 +296,7 @@ class StuffList(object): _immutable_ = True - + def f(n, a, i): stufflist = StuffList() stufflist.lst = [Stuff(a), Stuff(3)] @@ -339,7 +339,7 @@ res = self.meta_interp(f, [10], policy=StopAtXPolicy(g)) assert res == 3 - self.check_resops(**{self._new_op: 1}) + self.check_resops(**{self._new_op: 1}) def test_virtual_on_virtual(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'parent']) @@ -351,7 +351,7 @@ class SubNode(object): def __init__(self, f): self.f = f - + def f(n): subnode = self._new() subnode.value = 3 @@ -439,7 +439,7 @@ n -= 1 return sa assert self.meta_interp(f, [30]) == f(30) - + def test_constant_virtual2(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'node']) def f(n): @@ -461,7 +461,7 @@ n -= 1 return sa assert self.meta_interp(f, [31]) == f(31) - + def test_stored_reference_with_bridge1(self): class RefNode(object): def __init__(self, ref): @@ -711,7 +711,7 @@ assert self.meta_interp(f, [40]) == f(40) def FIXME_why_does_this_force(self): - mydriver = JitDriver(reds = ['i', 'j'], greens = []) + mydriver = JitDriver(reds = ['i', 'j'], greens = []) def f(): i = self._new() i.value = 0 @@ -727,7 +727,7 @@ assert self.meta_interp(f, []) == 20 def FIXME_why_does_this_force2(self): - mydriver = JitDriver(reds = ['i', 'j'], greens = []) + mydriver = JitDriver(reds = ['i', 'j'], greens = []) def f(): i = self._new() i.value = 0 @@ -745,7 +745,7 @@ i = j return i.value + j.value assert self.meta_interp(f, []) == 20 - + def test_virtual_skipped_by_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'm', 'i', 'x']) def f(n, m): @@ -778,12 +778,12 @@ node1 = next node2 = next n -= 1 - return node1.value + return node1.value res = self.meta_interp(f, [10]) assert res == f(10) self.check_resops(new_with_vtable=0, new=0) - + def test_retrace_not_matching_bridge(self): @dont_look_inside @@ -817,7 +817,7 @@ node2 = node2.new() i += 1 - return node.value + return node.value res = self.meta_interp(f, [10], repeat=10) assert res == f(10) self.check_resops(jump=2) @@ -855,7 +855,7 @@ node2 = node2.new() node.value += len(s) i += 1 - return node.value + return node.value res = self.meta_interp(f, [10], repeat=10) assert res == f(10) self.check_resops(jump=2) @@ -1018,12 +1018,12 @@ assert r == expected def test_arraycopy_disappears(self): - mydriver = JitDriver(reds = ['i'], greens = []) + mydriver = JitDriver(reds = ['i'], greens = []) def f(): i = 0 while i < 10: mydriver.can_enter_jit(i=i) - mydriver.jit_merge_point(i=i) + mydriver.jit_merge_point(i=i) t = (1, 2, 3, i + 1) t2 = t[:] del t @@ -1039,7 +1039,7 @@ class A(object): def __init__(self, state): self.state = state - + def f(): i = 0 s = 10000 @@ -1227,7 +1227,7 @@ class TestLLtype_Instance(VirtualTests, LLJitMixin): _new_op = 'new_with_vtable' _field_prefix = 'inst_' - + @staticmethod def _new(): return MyClass() @@ -1261,16 +1261,6 @@ self.check_resops(new_with_vtable=0, setfield_gc=0, getfield_gc=0, new=0) -class TestOOtype_Instance(VirtualTests, OOJitMixin): - _new_op = 'new_with_vtable' - _field_prefix = 'o' - - @staticmethod - def _new(): - return MyClass() - - test_class_with_default_fields = TestLLtype_Instance.test_class_with_default_fields.im_func - # ____________________________________________________________ # Run 2: all the tests use lltype.malloc to make a NODE @@ -1281,7 +1271,7 @@ class TestLLtype_NotObject(VirtualTests, LLJitMixin): _new_op = 'new' _field_prefix = '' - + @staticmethod def _new(): return lltype.malloc(NODE) @@ -1294,14 +1284,6 @@ 'floatval' : ootype.Float, 'extra': ootype.Signed}) -class TestOOtype_NotObject(VirtualTests, OOJitMixin): - _new_op = 'new_with_vtable' - _field_prefix = '' - - @staticmethod - def _new(): - return ootype.new(OONODE) - # ____________________________________________________________ # Run 3: all the tests use lltype.malloc to make a NODE2 # (same as Run 2 but it is part of the OBJECT hierarchy) @@ -1317,7 +1299,7 @@ class TestLLtype_Object(VirtualTests, LLJitMixin): _new_op = 'new_with_vtable' _field_prefix = '' - + @staticmethod def _new(): p = lltype.malloc(NODE2) @@ -1326,8 +1308,5 @@ # misc -class TestOOTypeMisc(VirtualMiscTests, OOJitMixin): - pass - class TestLLTypeMisc(VirtualMiscTests, LLJitMixin): pass diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -3,7 +3,7 @@ from rpython.jit.codewriter import heaptracker from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.warmspot import get_translator from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask @@ -1470,12 +1470,6 @@ }) -class TestOOtype(#ExplicitVirtualizableTests, - ImplicitVirtualizableTests, - OOJitMixin): - pass - - class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, LLJitMixin): diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py --- a/rpython/jit/metainterp/test/test_warmspot.py +++ b/rpython/jit/metainterp/test/test_warmspot.py @@ -4,7 +4,7 @@ from rpython.rlib.jit import JitDriver, set_param, unroll_safe, jit_callback from rpython.jit.backend.llgraph import runner -from rpython.jit.metainterp.test.support import LLJitMixin, OOJitMixin +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES @@ -502,13 +502,13 @@ # of W_InterpIterable), but we need to put it in a try/except block. # With the first "inline_in_portal" approach, this case crashed myjitdriver = JitDriver(greens = [], reds = 'auto') - + def inc(x, n): if x == n: raise OverflowError return x+1 inc._dont_inline_ = True - + class MyRange(object): def __init__(self, n): self.cur = 0 @@ -563,10 +563,6 @@ CPUClass = runner.LLGraphCPU type_system = 'lltype' -class TestOOWarmspot(WarmspotTests, OOJitMixin): - ##CPUClass = runner.OOtypeCPU - type_system = 'ootype' - class TestWarmspotDirect(object): def setup_class(cls): from rpython.jit.metainterp.typesystem import llhelper From noreply at buildbot.pypy.org Tue Jul 9 03:00:41 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:41 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: remove more CLI left-overs Message-ID: <20130709010041.7304A1C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65276:8c7a3e2b1d57 Date: 2013-07-07 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/8c7a3e2b1d57/ Log: remove more CLI left-overs diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -99,8 +99,6 @@ return "rpython.jit.backend.x86.runner", "CPU386_NO_SSE2" elif backend_name == MODEL_X86_64: return "rpython.jit.backend.x86.runner", "CPU_X86_64" - #elif backend_name == 'cli': - # return "rpython.jit.backend.cli.runner", "CliCPU" elif backend_name == MODEL_ARM: return "rpython.jit.backend.arm.runner", "CPU_ARM" else: diff --git a/rpython/rtyper/test/test_rfloat.py b/rpython/rtyper/test/test_rfloat.py --- a/rpython/rtyper/test/test_rfloat.py +++ b/rpython/rtyper/test/test_rfloat.py @@ -178,11 +178,7 @@ n1 = x * x n2 = y * y * y return rfloat.isnan(n1 / n2) - if self.__class__.__name__ != 'TestCliFloat': - # the next line currently fails on mono 2.6.7 (ubuntu 11.04), see: - # https://bugzilla.novell.com/show_bug.cgi?id=692493 - assert self.interpret(fn, [1e200, 1e200]) # nan - # + assert self.interpret(fn, [1e200, 1e200]) # nan assert not self.interpret(fn, [1e200, 1.0]) # +inf assert not self.interpret(fn, [1e200, -1.0]) # -inf assert not self.interpret(fn, [42.5, 2.3]) # +finite @@ -210,11 +206,7 @@ assert self.interpret(fn, [42.5, -2.3]) # -finite assert not self.interpret(fn, [1e200, 1.0]) # +inf assert not self.interpret(fn, [1e200, -1.0]) # -inf - # - if self.__class__.__name__ != 'TestCliFloat': - # the next line currently fails on mono 2.6.7 (ubuntu 11.04), see: - # https://bugzilla.novell.com/show_bug.cgi?id=692493 - assert not self.interpret(fn, [1e200, 1e200]) # nan + assert not self.interpret(fn, [1e200, 1e200]) # nan def test_formatd(self): from rpython.rlib.rfloat import formatd diff --git a/rpython/translator/interactive.py b/rpython/translator/interactive.py --- a/rpython/translator/interactive.py +++ b/rpython/translator/interactive.py @@ -128,17 +128,6 @@ self.driver.compile_c() return self.driver.c_entryp - def compile_cli(self, **kwds): - self.update_options(kwds) - self.ensure_backend('cli') - self.driver.compile_cli() - return self.driver.c_entryp - - def source_cli(self, **kwds): - self.update_options(kwds) - self.ensure_backend('cli') - self.driver.source_cli() - def compile_jvm(self, **kwds): self.update_options(kwds) self.ensure_backend('jvm') From noreply at buildbot.pypy.org Tue Jul 9 03:00:42 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:42 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: remove all ootype tasks from the translation driver Message-ID: <20130709010042.9D2401C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65277:c7d65a6a3fa0 Date: 2013-07-07 21:29 +0200 http://bitbucket.org/pypy/pypy/changeset/c7d65a6a3fa0/ Log: remove all ootype tasks from the translation driver diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -350,15 +350,6 @@ rtyper = self.translator.buildrtyper(type_system='lltype') rtyper.specialize(dont_simplify_again=True) - OOTYPE = 'rtype_ootype' - @taskdef(['annotate'], "ootyping") - def task_rtype_ootype(self): - """ RTyping - ootype version - """ - # Maybe type_system should simply be an option used in task_rtype - rtyper = self.translator.buildrtyper(type_system="ootype") - rtyper.specialize(dont_simplify_again=True) - @taskdef([RTYPE], "JIT compiler generation") def task_pyjitpl_lltype(self): """ Generate bytecodes for JIT and flow the JIT helper functions @@ -373,20 +364,6 @@ # self.log.info("the JIT compiler was generated") - @taskdef([OOTYPE], "JIT compiler generation") - def task_pyjitpl_ootype(self): - """ Generate bytecodes for JIT and flow the JIT helper functions - ootype version - """ - get_policy = self.extra['jitpolicy'] - self.jitpolicy = get_policy(self) - # - from rpython.jit.metainterp.warmspot import apply_jit - apply_jit(self.translator, policy=self.jitpolicy, - backend_name='cli', inline=True) #XXX - # - self.log.info("the JIT compiler was generated") - @taskdef([RTYPE], "test of the JIT on the llgraph backend") def task_jittest_lltype(self): """ Run with the JIT on top of the llgraph backend @@ -408,14 +385,6 @@ from rpython.translator.backendopt.all import backend_optimizations backend_optimizations(self.translator) - OOBACKENDOPT = 'backendopt_ootype' - @taskdef([OOTYPE], "ootype back-end optimisations") - def task_backendopt_ootype(self): - """ Run all backend optimizations - ootype version - """ - from rpython.translator.backendopt.all import backend_optimizations - backend_optimizations(self.translator) - STACKCHECKINSERTION = 'stackcheckinsertion_lltype' @taskdef(['?'+BACKENDOPT, RTYPE, 'annotate'], "inserting stack checks") @@ -548,88 +517,6 @@ log.llinterpret.event("result -> %s" % v) - @taskdef(["?" + OOBACKENDOPT, OOTYPE], 'Generating JVM source') - def task_source_jvm(self): - from rpython.translator.jvm.genjvm import GenJvm - from rpython.translator.jvm.node import EntryPoint - - entry_point_graph = self.translator.graphs[0] - is_func = not self.standalone - entry_point = EntryPoint(entry_point_graph, is_func, is_func) - self.gen = GenJvm(udir, self.translator, entry_point) - self.jvmsource = self.gen.generate_source() - self.log.info("Wrote JVM code") - - @taskdef(['source_jvm'], 'Compiling JVM source') - def task_compile_jvm(self): - from rpython.translator.oosupport.support import unpatch_os - from rpython.translator.jvm.test.runtest import JvmGeneratedSourceWrapper - self.jvmsource.compile() - self.c_entryp = JvmGeneratedSourceWrapper(self.jvmsource) - # restore original os values - if hasattr(self, 'old_cli_defs'): - unpatch_os(self.old_cli_defs) - self.log.info("Compiled JVM source") - if self.standalone and self.exe_name: - self.copy_jvm_jar() - - def copy_jvm_jar(self): - import subprocess - basename = self.exe_name % self.get_info() - root = udir.join('pypy') - manifest = self.create_manifest(root) - jnajar = py.path.local(__file__).dirpath('jvm', 'src', 'jna.jar') - classlist = self.create_classlist(root, [jnajar]) - jarfile = py.path.local(basename + '.jar') - self.log.info('Creating jar file') - oldpath = root.chdir() - subprocess.call(['jar', 'cmf', str(manifest), str(jarfile), '@'+str(classlist)]) - oldpath.chdir() - - # create a convenience script - newexename = basename - f = file(newexename, 'w') - f.write("""#!/bin/bash -LEDIT=`type -p ledit` -EXE=`readlink $0` -if [ -z $EXE ] -then - EXE=$0 -fi -$LEDIT java -Xmx256m -jar $EXE.jar "$@" -""") - f.close() - os.chmod(newexename, 0755) - - def create_manifest(self, root): - filename = root.join('manifest.txt') - manifest = filename.open('w') - manifest.write('Main-class: pypy.Main\n\n') - manifest.close() - return filename - - def create_classlist(self, root, additional_jars=[]): - import subprocess - # first, uncompress additional jars - for jarfile in additional_jars: - oldpwd = root.chdir() - subprocess.call(['jar', 'xf', str(jarfile)]) - oldpwd.chdir() - filename = root.join('classlist.txt') - classlist = filename.open('w') - classfiles = list(root.visit('*.class', True)) - classfiles += root.visit('*.so', True) - classfiles += root.visit('*.dll', True) - classfiles += root.visit('*.jnilib', True) - for classfile in classfiles: - print >> classlist, classfile.relto(root) - classlist.close() - return filename - - @taskdef(['compile_jvm'], 'XXX') - def task_run_jvm(self): - pass - def proceed(self, goals): if not goals: if self.default_goal: diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -1,7 +1,6 @@ import py from rpython.translator.driver import TranslationDriver -import optparse def test_ctr(): td = TranslationDriver() @@ -27,16 +26,9 @@ assert td.backend_select_goals(['backendopt_lltype']) == [ 'backendopt_lltype'] - expected = ['annotate', 'backendopt_lltype', - 'backendopt_ootype', - 'llinterpret_lltype', - 'rtype_ootype', 'rtype_lltype', - 'source_cli', 'source_c', - 'compile_cli', 'compile_c', - 'compile_jvm', 'source_jvm', 'run_jvm', - 'pyjitpl_lltype', - 'pyjitpl_ootype'] - assert set(td.exposed) == set(expected) + expected = ['annotate', 'backendopt_lltype', 'llinterpret_lltype', + 'rtype_lltype', 'source_c', 'compile_c', 'pyjitpl_lltype', ] + assert set(td.exposed) == set(expected) td = TranslationDriver({'backend': None, 'type_system': 'lltype'}) From noreply at buildbot.pypy.org Tue Jul 9 03:00:43 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:43 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove ootype translation options Message-ID: <20130709010043.BACFC1C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65278:690e016f571c Date: 2013-07-08 00:56 +0200 http://bitbucket.org/pypy/pypy/changeset/690e016f571c/ Log: Remove ootype translation options diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -33,20 +33,11 @@ default=False, cmdline="--continuation", requires=[("translation.type_system", "lltype")]), ChoiceOption("type_system", "Type system to use when RTyping", - ["lltype", "ootype"], cmdline=None, default="lltype", - requires={ - "ootype": [ - ("translation.backendopt.constfold", False), - ("translation.backendopt.clever_malloc_removal", False), - ("translation.gc", "boehm"), # it's not really used, but some jit code expects a value here - ] - }), + ["lltype"], cmdline=None, default="lltype"), ChoiceOption("backend", "Backend to use for code generation", - ["c", "cli", "jvm"], default="c", + ["c"], default="c", requires={ "c": [("translation.type_system", "lltype")], - "cli": [("translation.type_system", "ootype")], - "jvm": [("translation.type_system", "ootype")], }, cmdline="-b --backend"), @@ -188,11 +179,6 @@ "If true, makes an lldebug build", default=False, cmdline="--lldebug"), - # options for ootype - OptionDescription("ootype", "Object Oriented Typesystem options", [ - BoolOption("mangle", "Mangle names of class members", default=True), - ]), - OptionDescription("backendopt", "Backend Optimization Options", [ # control inlining BoolOption("inline", "Do basic inlining and malloc removal", @@ -270,11 +256,6 @@ ('translation.backendopt.constfold', False)]) ]), - OptionDescription("cli", "GenCLI options", [ - BoolOption("trace_calls", "Trace function calls", default=False, - cmdline="--cli-trace-calls"), - BoolOption("exception_transformer", "Use exception transformer", default=False), - ]), ChoiceOption("platform", "target platform", ['host'] + PLATFORMS, default='host', cmdline='--platform', From noreply at buildbot.pypy.org Tue Jul 9 03:00:45 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 03:00:45 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: kill rpython.jit.metainterp.typesystem.OOTypeHelper Message-ID: <20130709010045.1E4911C0113@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65279:d00592d4942b Date: 2013-07-08 02:26 +0200 http://bitbucket.org/pypy/pypy/changeset/d00592d4942b/ Log: kill rpython.jit.metainterp.typesystem.OOTypeHelper diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -6,7 +6,7 @@ IntLowerBound, MININT, MAXINT from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp -from rpython.jit.metainterp.typesystem import llhelper, oohelper +from rpython.jit.metainterp.typesystem import llhelper from rpython.tool.pairtype import extendabletype from rpython.rlib.debug import debug_print from rpython.rlib.objectmodel import specialize @@ -256,7 +256,6 @@ CVAL_ZERO = ConstantValue(CONST_0) CVAL_ZERO_FLOAT = ConstantValue(Const._new(0.0)) llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL) -oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL) REMOVED = AbstractResOp(None) diff --git a/rpython/jit/metainterp/test/test_typesystem.py b/rpython/jit/metainterp/test/test_typesystem.py --- a/rpython/jit/metainterp/test/test_typesystem.py +++ b/rpython/jit/metainterp/test/test_typesystem.py @@ -1,6 +1,5 @@ from rpython.jit.metainterp import typesystem from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.ootypesystem import ootype class TypeSystemTests(object): @@ -35,19 +34,3 @@ def null_ref(self): return lltype.nullptr(llmemory.GCREF.TO) - - -class TestOOtype(TypeSystemTests): - helper = typesystem.oohelper - - def fresh_ref(self): - O = ootype.StaticMethod([], ootype.Signed) - o = O._example() - return ootype.cast_to_object(o) - - def duplicate_ref(self, x): - o = x.obj - return ootype.cast_to_object(o) - - def null_ref(self): - return ootype.NULL diff --git a/rpython/jit/metainterp/typesystem.py b/rpython/jit/metainterp/typesystem.py --- a/rpython/jit/metainterp/typesystem.py +++ b/rpython/jit/metainterp/typesystem.py @@ -132,7 +132,7 @@ def cast_to_ref(self, value): return lltype.cast_opaque_ptr(llmemory.GCREF, value) cast_to_ref._annspecialcase_ = 'specialize:ll' - + def getaddr_for_box(self, box): return box.getaddr() @@ -143,104 +143,4 @@ assert ref return lltype.identityhash(ref) -# ____________________________________________________________ - -class OOTypeHelper(TypeSystemHelper): - - name = 'ootype' - functionptr = staticmethod(ootype.static_meth) - nullptr = staticmethod(ootype.null) - cast_instance_to_base_ref = staticmethod(cast_instance_to_base_obj) - BASETYPE = ootype.Object - BoxRef = history.BoxObj - ConstRef = history.ConstObj - loops_done_with_this_frame_ref = None # patched by compile.py - NULLREF = history.ConstObj.value - CONST_NULL = history.ConstObj(NULLREF) - CVAL_NULLREF = None # patched by optimizeopt.py - - def new_ConstRef(self, x): - obj = ootype.cast_to_object(x) - return history.ConstObj(obj) - - def get_typeptr(self, obj): - return ootype.classof(obj) - - def get_FuncType(self, ARGS, RESULT): - FUNCTYPE = ootype.StaticMethod(ARGS, RESULT) - return FUNCTYPE, FUNCTYPE - - def get_superclass(self, TYPE): - return TYPE._superclass - - def cast_to_instance_maybe(self, TYPE, instance): - return instance - cast_to_instance_maybe._annspecialcase_ = 'specialize:arg(1)' - - def cast_fnptr_to_root(self, fnptr): - return ootype.cast_to_object(fnptr) - - def cls_of_box(self, cpu, box): - obj = box.getref(ootype.ROOT) - oocls = ootype.classof(obj) - return history.ConstObj(ootype.cast_to_object(oocls)) - - def subclassOf(self, cpu, clsbox1, clsbox2): - cls1 = clsbox1.getref(ootype.Class) - cls2 = clsbox2.getref(ootype.Class) - return ootype.subclassof(cls1, cls2) - - def get_exception_box(self, etype): - return history.ConstObj(etype) - - def get_exc_value_box(self, evalue): - return history.BoxObj(evalue) - - def get_exception_obj(self, evaluebox): - # only works when translated - obj = evaluebox.getref(ootype.ROOT) - return cast_base_ptr_to_instance(Exception, obj) - - def cast_to_baseclass(self, value): - return ootype.cast_from_object(ootype.ROOT, value) - - @specialize.ll() - def getlength(self, array): - return array.ll_length() - - @specialize.ll() - def getarrayitem(self, array, i): - return array.ll_getitem_fast(i) - - @specialize.ll() - def setarrayitem(self, array, i, newvalue): - array.ll_setitem_fast(i, newvalue) - - def conststr(self, str): - oo = oostr(str) - return history.ConstObj(ootype.cast_to_object(oo)) - - # A dict whose keys are refs (like the .value of BoxObj). - # It is a normal dict on ootype. Two copies, to avoid conflicts - # with the value type. - def new_ref_dict(self): - return {} - def new_ref_dict_2(self): - return {} - - def cast_vtable_to_hashable(self, cpu, obj): - return ootype.cast_to_object(obj) - - def cast_from_ref(self, TYPE, value): - return ootype.cast_from_object(TYPE, value) - cast_from_ref._annspecialcase_ = 'specialize:arg(1)' - - def cast_to_ref(self, value): - return ootype.cast_to_object(value) - cast_to_ref._annspecialcase_ = 'specialize:ll' - - def getaddr_for_box(self, box): - return box.getref_base() - llhelper = LLTypeHelper() -oohelper = OOTypeHelper() From noreply at buildbot.pypy.org Tue Jul 9 10:04:53 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 9 Jul 2013 10:04:53 +0200 (CEST) Subject: [pypy-commit] pypy default: This code was indented with 5 space :( Message-ID: <20130709080453.4D7171C2FE8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65280:f4dde596e35c Date: 2013-07-09 18:04 +1000 http://bitbucket.org/pypy/pypy/changeset/f4dde596e35c/ Log: This code was indented with 5 space :( diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -708,7 +708,7 @@ return MapDictIteratorValues(self.space, self, w_dict) def iteritems(self, w_dict): return MapDictIteratorItems(self.space, self, w_dict) - + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() @@ -716,69 +716,69 @@ _become(obj, new_obj) class MapDictIteratorKeys(BaseKeyIterator): - def __init__(self, space, strategy, dictimplementation): - BaseKeyIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseKeyIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_key_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr - return None + def next_key_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr + return None + class MapDictIteratorValues(BaseValueIterator): - def __init__(self, space, strategy, dictimplementation): - BaseValueIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseValueIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_value_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - return self.w_obj.getdictvalue(self.space, attr) - return None + def next_value_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + return self.w_obj.getdictvalue(self.space, attr) + return None + class MapDictIteratorItems(BaseItemIterator): - def __init__(self, space, strategy, dictimplementation): - BaseItemIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseItemIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_item_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None, None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr, self.w_obj.getdictvalue(self.space, attr) - return None, None + def next_item_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None, None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr, self.w_obj.getdictvalue(self.space, attr) + return None, None + # ____________________________________________________________ # Magic caching @@ -860,7 +860,7 @@ # selector = ("", INVALID) if w_descr is None: - selector = (name, DICT) #common case: no such attr in the class + selector = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, TypeCell): pass # we have a TypeCell in the class: give up elif space.is_data_descr(w_descr): @@ -890,7 +890,6 @@ LOAD_ATTR_slowpath._dont_inline_ = True def LOOKUP_METHOD_mapdict(f, nameindex, w_obj): - space = f.space pycode = f.getcode() entry = pycode._mapdict_caches[nameindex] if entry.is_valid_for_obj(w_obj): From noreply at buildbot.pypy.org Tue Jul 9 10:06:31 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 9 Jul 2013 10:06:31 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: skip stm_barriers; no implementation in runner.py Message-ID: <20130709080631.48F541C2FC5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65281:5d3354d5a836 Date: 2013-07-08 08:26 +0200 http://bitbucket.org/pypy/pypy/changeset/5d3354d5a836/ Log: skip stm_barriers; no implementation in runner.py diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -958,6 +958,12 @@ def execute_cond_call_gc_wb_array(self, descr, a, b, c): py.test.skip("cond_call_gc_wb_array not supported") + def execute_cond_call_stm_wb(self, descr, a, b): + py.test.skip("cond_call_stm_wb not supported") + + def execute_cond_call_stm_rb(self, descr, a, b): + py.test.skip("cond_call_stm_rb not supported") + def execute_keepalive(self, descr, x): pass diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -9,8 +9,8 @@ # # Any SETFIELD_GC, SETARRAYITEM_GC, SETINTERIORFIELD_GC must be done on a # W object. The operation that forces an object p1 to be W is -# COND_CALL_GC_WB(p1, 0, descr=x2Wdescr), for x in 'PGORL'. This -# COND_CALL_GC_WB is a bit special because if p1 is not W, it *replaces* +# COND_CALL_STM_WB(p1, 0, descr=x2Wdescr), for x in 'PGORL'. This +# COND_CALL_STM_WB is a bit special because if p1 is not W, it *replaces* # its value with the W copy (by changing the register's value and # patching the stack location if any). It's still conceptually the same # object, but the pointer is different. diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -345,9 +345,9 @@ if value in (rop.FORCE_TOKEN, rop.CALL_ASSEMBLER, rop.COND_CALL_GC_WB, + rop.COND_CALL_GC_WB_ARRAY, rop.COND_CALL_STM_WB, rop.COND_CALL_STM_RB, - rop.COND_CALL_GC_WB_ARRAY, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, From noreply at buildbot.pypy.org Tue Jul 9 10:06:32 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 9 Jul 2013 10:06:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: we don't need that extra parameter Message-ID: <20130709080632.996451C2FC5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65282:b29a9e46f214 Date: 2013-07-09 10:04 +0200 http://bitbucket.org/pypy/pypy/changeset/b29a9e46f214/ Log: we don't need that extra parameter diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -958,10 +958,10 @@ def execute_cond_call_gc_wb_array(self, descr, a, b, c): py.test.skip("cond_call_gc_wb_array not supported") - def execute_cond_call_stm_wb(self, descr, a, b): + def execute_cond_call_stm_wb(self, descr, a): py.test.skip("cond_call_stm_wb not supported") - def execute_cond_call_stm_rb(self, descr, a, b): + def execute_cond_call_stm_rb(self, descr, a): py.test.skip("cond_call_stm_rb not supported") def execute_keepalive(self, descr, x): diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -9,7 +9,7 @@ # # Any SETFIELD_GC, SETARRAYITEM_GC, SETINTERIORFIELD_GC must be done on a # W object. The operation that forces an object p1 to be W is -# COND_CALL_STM_WB(p1, 0, descr=x2Wdescr), for x in 'PGORL'. This +# COND_CALL_STM_WB(p1, descr=x2Wdescr), for x in 'PGORL'. This # COND_CALL_STM_WB is a bit special because if p1 is not W, it *replaces* # its value with the W copy (by changing the register's value and # patching the stack location if any). It's still conceptually the same @@ -120,7 +120,7 @@ write_barrier_descr = mpcat[target_category] except KeyError: return v_base # no barrier needed - args = [v_base, self.c_zero] + args = [v_base,] if target_category == 'W': op = rop.COND_CALL_STM_WB else: diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -44,7 +44,7 @@ jump() """, """ [p1, p2] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -59,7 +59,7 @@ """, """ [p1, p2] p3 = same_as(ConstPtr(t)) - cond_call_stm_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, descr=P2Wdescr) setfield_gc(p3, p2, descr=tzdescr) jump() """, t=NULL) @@ -87,9 +87,9 @@ jump() """, """ [p1, p2, p3, p4] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) - cond_call_stm_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, descr=P2Wdescr) setfield_gc(p3, p4, descr=tzdescr) jump() """) @@ -102,7 +102,7 @@ jump() """, """ [p1, p2, i3] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) setfield_gc(p1, i3, descr=tydescr) jump() @@ -117,10 +117,10 @@ jump(p1) """, """ [p1, p2, i3] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) label(p1, i3) - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, i3, descr=tydescr) jump(p1) """) @@ -162,7 +162,7 @@ jump(p2) """, """ [p1] - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) """) @@ -177,7 +177,7 @@ """, """ [p1] p3 = same_as(ConstPtr(t)) - cond_call_stm_rb(p3, 0, descr=P2Rdescr) + cond_call_stm_rb(p3, descr=P2Rdescr) p2 = getfield_gc(p3, descr=tzdescr) jump(p2) """, t=NULL) @@ -190,7 +190,7 @@ jump(i3) """, """ [p1, i2] - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) i3 = getarrayitem_gc(p1, i2, descr=adescr) jump(i3) """) @@ -202,7 +202,7 @@ jump(i3) """, """ [p1, i2] - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) i3 = getinteriorfield_gc(p1, i2, descr=adescr) jump(i3) """) @@ -215,7 +215,7 @@ jump(p2, i2) """, """ [p1] - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) i2 = getfield_gc(p1, descr=tydescr) jump(p2, i2) @@ -229,9 +229,9 @@ jump(p2, i2) """, """ [p1] - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_rb(p2, 0, descr=P2Rdescr) + cond_call_stm_rb(p2, descr=P2Rdescr) i2 = getfield_gc(p2, descr=tydescr) jump(p2, i2) """) @@ -247,10 +247,10 @@ jump(p1) """, """ [p1] - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) i1 = getfield_gc(p1, descr=tydescr) i2 = int_add(i1, 1) - cond_call_stm_wb(p1, 0, descr=R2Wdescr) + cond_call_stm_wb(p1, descr=R2Wdescr) setfield_gc(p1, i2, descr=tydescr) jump(p1) """) @@ -263,7 +263,7 @@ jump(p2) """, """ [p1] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, 123, descr=tydescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) @@ -295,10 +295,10 @@ jump(p2) """, """ [p1] - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) call(p2) - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, 5, descr=tydescr) jump(p2) """) @@ -358,9 +358,9 @@ jump() """, """ [p1, i1, p2, p3, i3, p4] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setarrayitem_gc(p1, i1, p2, descr=adescr) - cond_call_stm_wb(p3, 0, descr=P2Wdescr) + cond_call_stm_wb(p3, descr=P2Wdescr) setarrayitem_gc(p3, i3, p4, descr=adescr) jump() """) @@ -374,7 +374,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setarrayitem_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setarrayitem_gc(p1, i3, p3, descr=adescr) @@ -390,7 +390,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setinteriorfield_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setinteriorfield_gc(p1, i3, p3, descr=adescr) @@ -405,7 +405,7 @@ jump() """, """ [p1, i2, i3] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) jump() @@ -432,11 +432,11 @@ jump(i2, p7) """ % op, """ [i1, i2, i3, p7] - cond_call_stm_wb(p7, 0, descr=P2Wdescr) + cond_call_stm_wb(p7, descr=P2Wdescr) setfield_gc(p7, 10, descr=tydescr) $INEV %s - cond_call_stm_wb(p7, 0, descr=P2Wdescr) + cond_call_stm_wb(p7, descr=P2Wdescr) setfield_gc(p7, 20, descr=tydescr) jump(i2, p7) """ % op, calldescr2=calldescr2) @@ -448,8 +448,8 @@ jump() """, """ [p1, p2, i1, i2, i3] - cond_call_stm_wb(p2, 0, descr=P2Wdescr) - cond_call_stm_rb(p1, 0, descr=P2Rdescr) + cond_call_stm_wb(p2, descr=P2Wdescr) + cond_call_stm_rb(p1, descr=P2Rdescr) copystrcontent(p1, p2, i1, i2, i3) jump() """) @@ -468,7 +468,7 @@ jump(p1) """ % op, """ [p1] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s setfield_gc(p1, 20, descr=tydescr) @@ -491,10 +491,10 @@ jump(p1) """ % op, """ [p1] - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s - cond_call_stm_wb(p1, 0, descr=P2Wdescr) + cond_call_stm_wb(p1, descr=P2Wdescr) setfield_gc(p1, 20, descr=tydescr) jump(p1) """ % op, calldescr2=calldescr2) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -500,8 +500,8 @@ 'STRSETITEM/3', 'UNICODESETITEM/3', #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_STM_WB/2d', # [objptr, newvalue] (write barrier) - 'COND_CALL_STM_RB/2d', # [objptr, newvalue] (read barrier) + 'COND_CALL_STM_WB/1d', # objptr (write barrier) + 'COND_CALL_STM_RB/1d', # objptr (read barrier) 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) 'DEBUG_MERGE_POINT/*', # debugging only From noreply at buildbot.pypy.org Tue Jul 9 11:33:50 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 11:33:50 +0200 (CEST) Subject: [pypy-commit] stmgc default: change / to - so we can run it on older duhton Message-ID: <20130709093350.AC96A1C2FE8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r375:5076bffd3728 Date: 2013-07-09 11:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/5076bffd3728/ Log: change / to - so we can run it on older duhton diff --git a/duhton/demo/trees.duh b/duhton/demo/trees.duh --- a/duhton/demo/trees.duh +++ b/duhton/demo/trees.duh @@ -1,6 +1,6 @@ (defun create-tree (n) - (if (< n 1) (list 1) (list (create-tree (/ n 2)) (create-tree (/ n 2)))) + (if (== n 0) (cons 1) (cons (create-tree (- n 1)) (create-tree (- n 1)))) ) (defun walk-tree (tree) @@ -9,7 +9,7 @@ ) ) -(setq tree (create-tree 1024)) +(setq tree (create-tree 10)) (print (walk-tree tree)) (setq n 0) (while (< n 1000) diff --git a/duhton/demo/trees2.duh b/duhton/demo/trees2.duh --- a/duhton/demo/trees2.duh +++ b/duhton/demo/trees2.duh @@ -1,6 +1,6 @@ (defun create-tree (n) - (if (< n 1) (list 1) (list (create-tree (/ n 2)) (create-tree (/ n 2)))) + (if (== n 0) (list 1) (list (create-tree (- n 1)) (create-tree (- n 1)))) ) (defun walk-tree (tree) @@ -10,7 +10,7 @@ ) (defun lookup-tree () - (walk-tree (create-tree 1024)) + (walk-tree (create-tree 10)) ) (setq n 0) From noreply at buildbot.pypy.org Tue Jul 9 11:33:51 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 11:33:51 +0200 (CEST) Subject: [pypy-commit] stmgc default: implement cons Message-ID: <20130709093351.C9E421C3000@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r376:df788d5d8fff Date: 2013-07-09 11:16 +0200 http://bitbucket.org/pypy/stmgc/changeset/df788d5d8fff/ Log: implement cons diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -527,6 +527,14 @@ return DuCons_Cdr(obj); } +DuObject *du_cons(DuObject *cons, DuObject *locals) +{ + DuObject *obj1, *obj2; + _du_getargs2("cons", cons, locals, &obj1, &obj2); + + return DuCons_New(obj1, obj2); +} + DuObject *du_not(DuObject *cons, DuObject *locals) { DuObject *obj; @@ -627,6 +635,7 @@ DuFrame_SetBuiltinMacro(Du_Globals, "defun", du_defun); DuFrame_SetBuiltinMacro(Du_Globals, "car", du_car); DuFrame_SetBuiltinMacro(Du_Globals, "cdr", du_cdr); + DuFrame_SetBuiltinMacro(Du_Globals, "cons", du_cons); DuFrame_SetBuiltinMacro(Du_Globals, "not", du_not); DuFrame_SetBuiltinMacro(Du_Globals, "transaction", du_transaction); DuFrame_SetBuiltinMacro(Du_Globals, "sleepms", du_sleepms); diff --git a/duhton/test/test_cons.py b/duhton/test/test_cons.py --- a/duhton/test/test_cons.py +++ b/duhton/test/test_cons.py @@ -5,6 +5,7 @@ assert run("(print ())") == "None\n" assert run("(print None)") == "None\n" assert run("(print (quote (1 2 3)))") == "( 1 2 3 )\n" + assert run("(print (cons 1 2))") == "( 1 . 2 )\n" def test_car_cdr(): assert run("(print (car (quote (2 3))))") == "2\n" From noreply at buildbot.pypy.org Tue Jul 9 11:33:53 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 11:33:53 +0200 (CEST) Subject: [pypy-commit] stmgc default: implement pair? Message-ID: <20130709093353.0A9731C300F@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r377:ffe6884d33a4 Date: 2013-07-09 11:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/ffe6884d33a4/ Log: implement pair? diff --git a/duhton/demo/trees.duh b/duhton/demo/trees.duh --- a/duhton/demo/trees.duh +++ b/duhton/demo/trees.duh @@ -1,6 +1,6 @@ (defun create-tree (n) - (if (== n 0) (cons 1) (cons (create-tree (- n 1)) (create-tree (- n 1)))) + (if (== n 0) 1 (cons (create-tree (- n 1)) (create-tree (- n 1)))) ) (defun walk-tree (tree) diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -588,6 +588,16 @@ return DuInt_FromInt(res != NULL); } +DuObject *du_pair(DuObject *cons, DuObject *locals) +{ + _du_read1(cons); + if (cons == Du_None || _DuCons_NEXT(cons) != Du_None) + Du_FatalError("pair?: expected one argument"); + + DuObject *ob = _DuCons_CAR(cons); + return DuInt_FromInt(DuCons_Check(ob)); +} + DuObject *du_assert(DuObject *cons, DuObject *locals) { DuObject *obj; @@ -640,6 +650,7 @@ DuFrame_SetBuiltinMacro(Du_Globals, "transaction", du_transaction); DuFrame_SetBuiltinMacro(Du_Globals, "sleepms", du_sleepms); DuFrame_SetBuiltinMacro(Du_Globals, "defined?", du_defined); + DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); DuFrame_SetBuiltinMacro(Du_Globals, "assert", du_assert); DuFrame_SetSymbolStr(Du_Globals, "None", Du_None); } diff --git a/duhton/test/test_cons.py b/duhton/test/test_cons.py --- a/duhton/test/test_cons.py +++ b/duhton/test/test_cons.py @@ -7,6 +7,10 @@ assert run("(print (quote (1 2 3)))") == "( 1 2 3 )\n" assert run("(print (cons 1 2))") == "( 1 . 2 )\n" +def test_pair(): + assert run("(print (pair? 1))") == "0\n" + assert run("(print (pair? (cons 1 2)))") == "1\n" + def test_car_cdr(): assert run("(print (car (quote (2 3))))") == "2\n" assert run("(print (cdr (quote (2 3))))") == "( 3 )\n" From noreply at buildbot.pypy.org Tue Jul 9 11:56:23 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 11:56:23 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix pair and use it in tree demos Message-ID: <20130709095623.E1E0F1C0EF5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r378:70de751b15e8 Date: 2013-07-09 11:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/70de751b15e8/ Log: fix pair and use it in tree demos diff --git a/duhton/demo/trees.duh b/duhton/demo/trees.duh --- a/duhton/demo/trees.duh +++ b/duhton/demo/trees.duh @@ -4,8 +4,9 @@ ) (defun walk-tree (tree) - (if (== (len tree) 1) (get tree 0) - (+ (walk-tree (get tree 0)) (walk-tree (get tree 1))) + (if (pair? tree) + (+ (walk-tree (car tree)) (walk-tree (cdr tree))) + 1 ) ) diff --git a/duhton/demo/trees2.duh b/duhton/demo/trees2.duh --- a/duhton/demo/trees2.duh +++ b/duhton/demo/trees2.duh @@ -1,11 +1,13 @@ + (defun create-tree (n) - (if (== n 0) (list 1) (list (create-tree (- n 1)) (create-tree (- n 1)))) + (if (== n 0) 1 (cons (create-tree (- n 1)) (create-tree (- n 1)))) ) (defun walk-tree (tree) - (if (== (len tree) 1) (get tree 0) - (+ (walk-tree (get tree 0)) (walk-tree (get tree 1))) + (if (pair? tree) + (+ (walk-tree (car tree)) (walk-tree (cdr tree))) + 1 ) ) diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -590,12 +590,9 @@ DuObject *du_pair(DuObject *cons, DuObject *locals) { - _du_read1(cons); - if (cons == Du_None || _DuCons_NEXT(cons) != Du_None) - Du_FatalError("pair?: expected one argument"); - - DuObject *ob = _DuCons_CAR(cons); - return DuInt_FromInt(DuCons_Check(ob)); + DuObject *obj; + _du_getargs1("pair?", cons, locals, &obj); + return DuInt_FromInt(DuCons_Check(obj)); } DuObject *du_assert(DuObject *cons, DuObject *locals) From noreply at buildbot.pypy.org Tue Jul 9 13:36:48 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 9 Jul 2013 13:36:48 +0200 (CEST) Subject: [pypy-commit] pypy default: A failing test of taking the union of a type and a PBC Message-ID: <20130709113648.A29711C1260@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65283:a788df771340 Date: 2013-07-09 21:36 +1000 http://bitbucket.org/pypy/pypy/changeset/a788df771340/ Log: A failing test of taking the union of a type and a PBC diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -740,6 +740,32 @@ s = a.build_types(f, [B]) assert s.classdef is a.bookkeeper.getuniqueclassdef(C) + def test_union_type_some_opbc(self): + class A(object): + def f(self): + return type(self) + + class B(A): + pass + + def f(tp): + return tp + + def main(n): + if n: + if n == 1: + inst = A() + else: + inst = B() + arg = inst.f() + else: + arg = B + return f(arg).__name__ + + a = self.RPythonAnnotator() + s = a.build_types(main, [int]) + assert isinstance(s, annmodel.SomeString) + def test_ann_assert(self): def assert_(x): assert x,"XXX" From noreply at buildbot.pypy.org Tue Jul 9 13:57:29 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 13:57:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Start writing a blog post Message-ID: <20130709115729.9BC3F1C1260@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4977:3d38052a94e1 Date: 2013-07-09 13:23 +0200 http://bitbucket.org/pypy/extradoc/changeset/3d38052a94e1/ Log: Start writing a blog post diff --git a/blog/draft/duhton.rst b/blog/draft/duhton.rst new file mode 100644 --- /dev/null +++ b/blog/draft/duhton.rst @@ -0,0 +1,34 @@ + +Software Transactional Memory lisp experiments +============================================== + +As covered in `the previous blog post`_, the STM subproject of PyPy has been +back on the drawing board and the result of this experiment is an STM-aware +garbage collector written in C. This is finished by now, thanks to Armin +and Remi_M work, we have a fully functional garbage collector and STM subsystem +that can be used from any C program with enough effort. Using it is more than +a little mundane, since you have to inserts write and read barriers by hand +everywhere in your code that reads or writes to garbage collector controlled +memory. Once we finish PyPy integration, those sort of things would be inserted +automatically by STM transformation in the interpreter. + +However, to experiment some more, we created a `lisp interpreter`_ +(called duhton), that follows closely CPython's implementation strategy +and for anyone familiar with CPython's source code, it should be pretty +readable. This interpreter works like a normal and very basic lisp variant, +however it comes with ``(transaction`` builtin, that lets you spawn transactions +using STM system. We implemented a few demos that let you play with the +transaction system. All the demos are running without conflicts, which means +there is no conflicting writes to global memory and hence are amenable to +parallelization very well. They exercise: + +* arithmetics - ``demo/many_sqare_roots.duh`` + +* read-only access to globals - ``demo/trees.duh`` + +* read-write access to local objects - ``demo/trees2.duh`` + +With the latter ones being very similar to the classic gcbench. STM-aware +duhton can be found in `the stmgc repo`_, while the STM-less duhton, +that uses refcounting, can be found in `the duhton repo`_ under the ``base`` +branch. From noreply at buildbot.pypy.org Tue Jul 9 13:57:30 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 13:57:30 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20130709115730.C41DE1C1260@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4978:2f847bf4e26e Date: 2013-07-09 13:28 +0200 http://bitbucket.org/pypy/extradoc/changeset/2f847bf4e26e/ Log: merge diff --git a/blog/draft/pypy-alpha-arm.rst b/blog/draft/pypy-alpha-arm.rst --- a/blog/draft/pypy-alpha-arm.rst +++ b/blog/draft/pypy-alpha-arm.rst @@ -72,71 +72,40 @@ * relative speedup (how much bigger the x86 speedup is over ARM speedup) -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ +(in case this table is not readable, please visit http://morepypy.blogspot.com/2013/05/pypy-20-alpha-for-arm.html) + | Benchmark | PyPy vs CPython (arm) | PyPy vs CPython (x86) | x86 vs arm (pypy) | x86 vs arm (cpython) | relative speedup | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | ai | 3.61 | 3.16 | 7.70 | 8.82 | 0.87 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | bm_mako | 3.41 | 2.11 | 8.56 | 13.82 | 0.62 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | chaos | 21.82 | 17.80 | 6.93 | 8.50 | 0.82 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | crypto_pyaes | 22.53 | 19.48 | 6.53 | 7.56 | 0.86 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | django | 13.43 | 11.16 | 7.90 | 9.51 | 0.83 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | eparse | 1.43 | 1.17 | 6.61 | 8.12 | 0.81 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | fannkuch | 6.22 | 5.36 | 6.18 | 7.16 | 0.86 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | float | 5.22 | 6.00 | 9.68 | 8.43 | 1.15 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | go | 4.72 | 3.34 | 5.91 | 8.37 | 0.71 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | hexiom2 | 8.70 | 7.00 | 7.69 | 9.56 | 0.80 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | html5lib | 2.35 | 2.13 | 6.59 | 7.26 | 0.91 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | json_bench | 1.12 | 0.93 | 7.19 | 8.68 | 0.83 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | meteor-contest | 2.13 | 1.68 | 5.95 | 7.54 | 0.79 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | nbody_modified | 8.19 | 7.78 | 6.08 | 6.40 | 0.95 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | pidigits | 1.27 | 0.95 | 14.67 | 19.66 | 0.75 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | pyflate-fast | 3.30 | 3.57 | 10.64 | 9.84 | 1.08 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | raytrace-simple | 46.41 | 29.00 | 5.14 | 8.23 | 0.62 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | richards | 31.48 | 28.51 | 6.95 | 7.68 | 0.91 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | slowspitfire | 1.28 | 1.14 | 5.91 | 6.61 | 0.89 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | spambayes | 1.93 | 1.27 | 4.15 | 6.30 | 0.66 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | sphinx | 1.01 | 1.05 | 7.76 | 7.45 | 1.04 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | spitfire | 1.55 | 1.58 | 5.62 | 5.49 | 1.02 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | spitfire_cstringio | 9.61 | 5.74 | 5.43 | 9.09 | 0.60 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | sympy_expand | 1.42 | 0.97 | 3.86 | 5.66 | 0.68 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | sympy_integrate | 1.60 | 0.95 | 4.24 | 7.12 | 0.60 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | sympy_str | 0.72 | 0.48 | 3.68 | 5.56 | 0.66 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | sympy_sum | 1.99 | 1.19 | 3.83 | 6.38 | 0.60 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | telco | 14.28 | 9.36 | 3.94 | 6.02 | 0.66 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | twisted_iteration | 11.60 | 7.33 | 6.04 | 9.55 | 0.63 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | twisted_names | 3.68 | 2.83 | 5.01 | 6.50 | 0.77 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ | twisted_pb | 4.94 | 3.02 | 5.10 | 8.34 | 0.61 | -+--------------------+-----------------------+-----------------------+-------------------+----------------------+------------------+ It seems that Cortex A9, while significantly slower than Xeon, has higher slowdowns with a large interpreter (CPython) than a JIT compiler (PyPy). This From noreply at buildbot.pypy.org Tue Jul 9 13:57:32 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 13:57:32 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20130709115732.12A211C1260@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4979:e8246309398b Date: 2013-07-09 13:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/e8246309398b/ Log: merge diff --git a/blog/draft/py3k-status-update-11.rst b/blog/draft/py3k-status-update-11.rst new file mode 100644 --- /dev/null +++ b/blog/draft/py3k-status-update-11.rst @@ -0,0 +1,63 @@ +Py3k status update #11 +---------------------- + +This is the 11th status update about our work on the `py3k branch`_, which we +can work on thanks to all of the people who donated_ to the `py3k proposal`_. + +Here's some highlights of the progress made since the previous update: + +* PyPy py3k now matches CPython 3's hash code for + int/float/complex/Decimal/Fraction + +* Various outstanding unicode identifier related issues were + resolved. E.g. test_importlib/pep263/ucn/unicode all now fully pass. Various + usage of identifiers (in particular type and module names) have been fixed to + handle non-ascii names -- mostly around display of reprs and exception + messages. + +* The unicodedata database has been upgraded to 6.0.0. + +* Windows support has greatly improved, though it could still use some more + help (but so does the default branch to a certain degree). + +* Probably the last of the parsing related bugs/features have been taken care + of. + +* Of course various other smaller miscellaneous fixes + +This leaves the branch w/ only about 5 outstanding failures of the stdlib test +suite: + +* test_float + + 1 failing test about containment of floats in collections. + +* test_memoryview + + Various failures: requires some bytes/str changes among other things (Manuel + Jacob's has some progress on this on the `py3k-memoryview branch`_) + +* test_multiprocessing + + 1 or more tests deadlock on some platforms + +* test_sys and test_threading + + 2 failing tests for the New GIL's new API + +Probably the biggest feature left to tackle is the New GIL. + +We're now pretty close to pushing an initial release. We had planned for one +around PyCon, but having missed that we've put some more effort into the branch +to provide a more fully-fledged initial release. + +Thanks to the following for their contributions: Manuel Jacob, Amaury Forgeot +d'Arc, Karl Ramm, Jason Chu and Christian Hudon. + +cheers, +Phil + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`py3k branch`: https://bitbucket.org/pypy/pypy/commits/all/tip/branch%28%22py3k%22%29 +.. _`py3k-memoryview branch`: https://bitbucket.org/pypy/pypy/compare/py3k-memoryview..py3k diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -77,6 +77,11 @@ - calling string equality does not automatically promote the argument to a constant. +- i0 = int_add_ovf(9223372036854775807, 1) + guard_overflow() + +- p0 = call_pure(ConstClass(something), ConstPtr(2)) + guard_exception(SomeException) PYTHON EXAMPLES --------------- diff --git a/talk/pycon2013/pypy_without_gil/message_passing.py b/talk/pycon2013/pypy_without_gil/message_passing.py --- a/talk/pycon2013/pypy_without_gil/message_passing.py +++ b/talk/pycon2013/pypy_without_gil/message_passing.py @@ -24,5 +24,4 @@ running_threads -= 1 else: num += 1 - print num diff --git a/talk/pycon2013/pypy_without_gil/transactions2.py b/talk/pycon2013/pypy_without_gil/transactions2.py --- a/talk/pycon2013/pypy_without_gil/transactions2.py +++ b/talk/pycon2013/pypy_without_gil/transactions2.py @@ -8,11 +8,12 @@ def do_stuff_for_all(lst): + #for x in lst: + # do_stuff(x) + for x in lst: - do_stuff(x) - #for x in lst: - # transaction.add(do_stuff, x) - #transaction.run() + transaction.add(do_stuff, x) + transaction.run() do_stuff_for_all(range(20)) From noreply at buildbot.pypy.org Tue Jul 9 14:00:37 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 9 Jul 2013 14:00:37 +0200 (CEST) Subject: [pypy-commit] pypy default: 2.1 beta announcement Message-ID: <20130709120037.1A5311C1260@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65284:9d2af55df726 Date: 2013-07-09 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/9d2af55df726/ Log: 2.1 beta announcement diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,66 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcomming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.0 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture. + +* Various numpy improvements. + +* Bugfixes to cffi and ctypes. + +* Bugfixes to the stacklet support + +* Improved logging performance + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team From noreply at buildbot.pypy.org Tue Jul 9 14:06:55 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 9 Jul 2013 14:06:55 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test Message-ID: <20130709120655.C0BCB1C1260@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65285:4f6d8c39e8cc Date: 2013-07-09 22:04 +1000 http://bitbucket.org/pypy/pypy/changeset/4f6d8c39e8cc/ Log: fix test diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -742,11 +742,13 @@ def test_union_type_some_opbc(self): class A(object): + name = "A" + def f(self): return type(self) class B(A): - pass + name = "B" def f(tp): return tp @@ -760,7 +762,7 @@ arg = inst.f() else: arg = B - return f(arg).__name__ + return f(arg).name a = self.RPythonAnnotator() s = a.build_types(main, [int]) From noreply at buildbot.pypy.org Tue Jul 9 14:06:56 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 9 Jul 2013 14:06:56 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20130709120656.DD2EA1C1260@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65286:74dbbebd4e5a Date: 2013-07-09 22:06 +1000 http://bitbucket.org/pypy/pypy/changeset/74dbbebd4e5a/ Log: merged upstream diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,66 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcomming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.0 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture. + +* Various numpy improvements. + +* Bugfixes to cffi and ctypes. + +* Bugfixes to the stacklet support + +* Improved logging performance + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team From noreply at buildbot.pypy.org Tue Jul 9 14:11:00 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 14:11:00 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20130709121100.529F21C00B9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65287:ed1a1f167370 Date: 2013-07-09 14:10 +0200 http://bitbucket.org/pypy/pypy/changeset/ed1a1f167370/ Log: typo diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -8,7 +8,7 @@ not longer considered alpha level. We would like to thank the `Raspberry Pi Foundation`_ for supporting the work to finish PyPy's ARM support. -You can download the PyPy 2.0 beta 1 release here: +You can download the PyPy 2.1 beta 1 release here: http://pypy.org/download.html From noreply at buildbot.pypy.org Tue Jul 9 14:13:00 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 9 Jul 2013 14:13:00 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20130709121300.CD2601C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65288:5195aa291e6d Date: 2013-07-09 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/5195aa291e6d/ Log: typo diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -2,7 +2,7 @@ PyPy 2.1 beta 1 =============== -We're pleased to announce the first beta of the upcomming 2.1 release of PyPy. +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. This beta contains many bugfixes and improvements, numerous improvements to the numpy in pypy effort. The main feature being that the ARM processor support is not longer considered alpha level. We would like to thank the `Raspberry Pi From noreply at buildbot.pypy.org Tue Jul 9 14:31:59 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 9 Jul 2013 14:31:59 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: minor corrections Message-ID: <20130709123200.0055E1C0EF5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r4980:53412cc62d31 Date: 2013-07-09 14:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/53412cc62d31/ Log: minor corrections diff --git a/blog/draft/duhton.rst b/blog/draft/duhton.rst --- a/blog/draft/duhton.rst +++ b/blog/draft/duhton.rst @@ -3,24 +3,24 @@ ============================================== As covered in `the previous blog post`_, the STM subproject of PyPy has been -back on the drawing board and the result of this experiment is an STM-aware -garbage collector written in C. This is finished by now, thanks to Armin -and Remi_M work, we have a fully functional garbage collector and STM subsystem +back on the drawing board. The result of this experiment is an STM-aware +garbage collector written in C. This is finished by now, thanks to Armin's +and Remi's work, we have a fully functional garbage collector and a STM system that can be used from any C program with enough effort. Using it is more than a little mundane, since you have to inserts write and read barriers by hand everywhere in your code that reads or writes to garbage collector controlled -memory. Once we finish PyPy integration, those sort of things would be inserted -automatically by STM transformation in the interpreter. +memory. Once we finish PyPy integration, this manual work is done automatically +by the STM transformation in the interpreter. However, to experiment some more, we created a `lisp interpreter`_ -(called duhton), that follows closely CPython's implementation strategy -and for anyone familiar with CPython's source code, it should be pretty +(called Duhton), that follows closely CPython's implementation strategy. +For anyone familiar with CPython's source code, it should be pretty readable. This interpreter works like a normal and very basic lisp variant, -however it comes with ``(transaction`` builtin, that lets you spawn transactions -using STM system. We implemented a few demos that let you play with the +however it comes with a ``transaction`` builtin, that lets you spawn transactions +using the STM system. We implemented a few demos that let you play with the transaction system. All the demos are running without conflicts, which means -there is no conflicting writes to global memory and hence are amenable to -parallelization very well. They exercise: +there are no conflicting writes to global memory and hence the demos are very +amenable to parallelization. They exercise: * arithmetics - ``demo/many_sqare_roots.duh`` @@ -29,6 +29,6 @@ * read-write access to local objects - ``demo/trees2.duh`` With the latter ones being very similar to the classic gcbench. STM-aware -duhton can be found in `the stmgc repo`_, while the STM-less duhton, +Duhton can be found in `the stmgc repo`_, while the STM-less Duhton, that uses refcounting, can be found in `the duhton repo`_ under the ``base`` branch. From noreply at buildbot.pypy.org Tue Jul 9 15:06:22 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 15:06:22 +0200 (CEST) Subject: [pypy-commit] pypy default: it seems this is enough to enable fastpath for ['self', 'space'] unwrap_spec Message-ID: <20130709130622.D62591C0EF5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65289:8194ed86bd54 Date: 2013-07-09 15:04 +0200 http://bitbucket.org/pypy/pypy/changeset/8194ed86bd54/ Log: it seems this is enough to enable fastpath for ['self', 'space'] unwrap_spec diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") From noreply at buildbot.pypy.org Tue Jul 9 15:06:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 15:06:24 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130709130624.268971C0EF5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65290:8557e3918b25 Date: 2013-07-09 15:05 +0200 http://bitbucket.org/pypy/pypy/changeset/8557e3918b25/ Log: merge diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -2,7 +2,7 @@ PyPy 2.1 beta 1 =============== -We're pleased to announce the first beta of the upcomming 2.1 release of PyPy. +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. This beta contains many bugfixes and improvements, numerous improvements to the numpy in pypy effort. The main feature being that the ARM processor support is not longer considered alpha level. We would like to thank the `Raspberry Pi From noreply at buildbot.pypy.org Tue Jul 9 15:27:28 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 9 Jul 2013 15:27:28 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: kill tags Message-ID: <20130709132728.670DE1C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65291:347c6b018473 Date: 2013-07-09 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/347c6b018473/ Log: kill tags diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,6 +3,3 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 -9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm -9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm -ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm From noreply at buildbot.pypy.org Tue Jul 9 15:27:29 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 9 Jul 2013 15:27:29 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Added tag pypy-2.1-beta for changeset 347c6b018473 Message-ID: <20130709132729.94F471C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65292:1b792e2b2266 Date: 2013-07-09 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/1b792e2b2266/ Log: Added tag pypy-2.1-beta for changeset 347c6b018473 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,4 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 +347c6b01847308411f19c06f16ebe8945f55aa84 pypy-2.1-beta From noreply at buildbot.pypy.org Tue Jul 9 15:41:16 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 9 Jul 2013 15:41:16 +0200 (CEST) Subject: [pypy-commit] pypy default: add rpython to the list of directories covered by the license Message-ID: <20130709134116.C87B01C0EF5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65293:efcf18bcc46e Date: 2013-07-09 15:38 +0200 http://bitbucket.org/pypy/pypy/changeset/efcf18bcc46e/ Log: add rpython to the list of directories covered by the license diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License From noreply at buildbot.pypy.org Tue Jul 9 15:41:18 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 9 Jul 2013 15:41:18 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: add rpython to the list of directories covered by the license Message-ID: <20130709134118.9FBB61C0EF5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65294:a0e2bc9ceccd Date: 2013-07-09 15:38 +0200 http://bitbucket.org/pypy/pypy/changeset/a0e2bc9ceccd/ Log: add rpython to the list of directories covered by the license diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License From noreply at buildbot.pypy.org Tue Jul 9 15:41:19 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 9 Jul 2013 15:41:19 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Added tag pypy-2.1-beta for changeset a0e2bc9ceccd Message-ID: <20130709134119.BEFC31C0EF5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65295:efa3a7b96422 Date: 2013-07-09 15:40 +0200 http://bitbucket.org/pypy/pypy/changeset/efa3a7b96422/ Log: Added tag pypy-2.1-beta for changeset a0e2bc9ceccd diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -4,3 +4,5 @@ ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 347c6b01847308411f19c06f16ebe8945f55aa84 pypy-2.1-beta +347c6b01847308411f19c06f16ebe8945f55aa84 pypy-2.1-beta +a0e2bc9ceccdd7e734d4c881a051320441ea5200 pypy-2.1-beta From noreply at buildbot.pypy.org Tue Jul 9 18:24:25 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 9 Jul 2013 18:24:25 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: seems to be working mostly Message-ID: <20130709162425.644BE1C3364@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r379:5a4649117329 Date: 2013-07-09 18:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/5a4649117329/ Log: seems to be working mostly diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -945,6 +945,7 @@ revision_t my_lock = d->my_lock; wlog_t *item; + dprintf(("acquire_locks\n")); assert(!stm_has_got_any_lock(d)); assert(d->public_descriptor->stolen_objects.size == 0); @@ -957,6 +958,7 @@ revision_t v; retry: assert(R->h_tid & GCFLAG_PUBLIC); + assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); v = ACCESS_ONCE(R->h_revision); if (IS_POINTER(v)) /* "has a more recent revision" */ { @@ -989,7 +991,7 @@ static void CancelLocks(struct tx_descriptor *d) { wlog_t *item; - + dprintf(("cancel_locks\n")); if (!g2l_any_entry(&d->public_to_private)) return; @@ -1257,7 +1259,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - + dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); @@ -1341,6 +1343,7 @@ d->active = 2; d->reads_size_limit_nonatomic = 0; update_reads_size_limit(d); + dprintf(("make_inevitable(%p)\n", d)); } static revision_t acquire_inev_mutex_and_mark_global_cur_time( diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -370,8 +370,65 @@ for (; pobj != pend; pobj++) { obj = *pobj; assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - assert(IS_POINTER(obj->h_revision)); - visit((gcptr *)&obj->h_revision); + //assert(IS_POINTER(obj->h_revision)); + + gcptr next = (gcptr)obj->h_revision; + /* XXX: do better. visit obj first and then + copy over if possible: */ + if (!(obj->h_revision & 1) + && (next->h_revision & 1) + && !(next->h_tid & GCFLAG_VISITED) + && (next->h_tid & GCFLAG_OLD) + && !(next->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) /* XXX */ + && !(obj->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) { + + assert(next->h_original == (revision_t)obj); + assert(next->h_tid & GCFLAG_PUBLIC); + assert(!(next->h_tid & GCFLAG_STUB)); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!(next->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(next->h_tid & GCFLAG_BACKUP_COPY)); + + + revision_t pre_hash = obj->h_original; + revision_t old_tid = obj->h_tid; + memcpy(obj, next, stmgc_size(next)); + assert(!((obj->h_tid ^ old_tid) + & (GCFLAG_BACKUP_COPY | GCFLAG_STUB + | GCFLAG_PUBLIC | GCFLAG_HAS_ID + | GCFLAG_PRIVATE_FROM_PROTECTED))); + obj->h_original = pre_hash; + obj->h_tid = old_tid; + + fprintf(stdout, "copy %p over prebuilt %p\n", next, obj); + + /* will not be freed anyway and visit() only traces + head revision if not visited already */ + obj->h_tid &= ~GCFLAG_VISITED; + /* For those visiting later: + XXX: don't: they will think that they are outdated*/ + next->h_revision = (revision_t)obj; + //if (next->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { + // may have already lost it + /* mark somehow so that we can update pub_to_priv + for inevitable transactions and others ignore + it during tracing. Otherwise, inev transactions + will think 'next' is outdated. */ + next->h_tid &= ~GCFLAG_OLD; + //} + } + else if (IS_POINTER(obj->h_revision)) { + visit((gcptr *)&obj->h_revision); + } + + // prebuilt originals will always be traced + // in visit_keep. And otherwise, they may + // not lose their pub_to_priv flag + // I think because transactions abort + // without clearing the flags. + obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; + gcptrlist_insert(&objects_to_trace, obj); } } @@ -410,24 +467,45 @@ /* the current transaction's private copies of public objects */ wlog_t *item; + if (1 || d->active == 2) { + /* inevitable transactions need to have their pub_to_priv + fixed. Otherwise, they'll think their objects got outdated */ + /* XXX: others too, but maybe not worth it */ + struct G2L new_public_to_private; + memset(&new_public_to_private, 0, sizeof(struct G2L)); + + fprintf(stdout, "start fixup (%p):\n", d); + G2L_LOOP_FORWARD(d->public_to_private, item) { + gcptr R = item->addr; + gcptr L = item->val; + if (!(R->h_tid & GCFLAG_OLD)) { + /* R was copied over its original */ + gcptr new_R = (gcptr)R->h_original; + g2l_insert(&new_public_to_private, new_R, L); + G2L_LOOP_DELETE(item); + + if (L->h_revision == (revision_t)R) { + L->h_revision = (revision_t)new_R; + fprintf(stdout," fixup %p to %p <-> %p\n", R, new_R, L); + } + else + fprintf(stdout," fixup %p to %p -> %p\n", R, new_R, L); + } + } G2L_LOOP_END; + + /* copy to real pub_to_priv */ + G2L_LOOP_FORWARD(new_public_to_private, item) { + g2l_insert(&d->public_to_private, item->addr, item->val); + } G2L_LOOP_END; + g2l_delete_not_used_any_more(&new_public_to_private); + } + G2L_LOOP_FORWARD(d->public_to_private, item) { /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ gcptr R = item->addr; gcptr L = item->val; - /* Objects that were not visited yet must have the PUB_TO_PRIV - flag. Except if that transaction will abort anyway, then it - may be removed from a previous major collection that didn't - fix the PUB_TO_PRIV because the transaction was going to - abort anyway: - 1. minor_collect before major collect (R->L, R is outdated, abort) - 2. major collect removes flag - 3. major collect again, same thread, no time to abort - 4. flag still removed - */ - assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, - R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); visit_keep(R); if (L != NULL) { /* minor collection found R->L in public_to_young @@ -478,7 +556,12 @@ gcptr obj = items[i]; assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - if (!(obj->h_tid & GCFLAG_VISITED)) { + if (!(obj->h_tid & GCFLAG_OLD)) { + obj->h_tid |= GCFLAG_OLD; + items[i] = (gcptr)obj->h_revision; + assert(0); + } + else if (!(obj->h_tid & GCFLAG_VISITED)) { /* forget 'obj' */ items[i] = items[--d->private_from_protected.size]; } @@ -500,6 +583,13 @@ gcptr obj = items[i]; assert(!(obj->h_tid & GCFLAG_STUB)); + if (!(obj->h_tid & GCFLAG_OLD)) { + obj->h_tid |= GCFLAG_OLD; + obj = (gcptr)obj->h_revision; + items[i] = obj; + } + + /* Warning: in case the object listed is outdated and has been replaced with a more recent revision, then it might be the case that obj->h_revision doesn't have GCFLAG_VISITED, but @@ -510,6 +600,12 @@ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; } + + if (!(obj->h_tid & GCFLAG_OLD)) { + obj->h_tid |= GCFLAG_OLD; + obj = (gcptr)obj->h_revision; + items[i] = obj; + } revision_t v = obj->h_revision; if (IS_POINTER(v)) { @@ -551,6 +647,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { assert(item->addr->h_tid & GCFLAG_VISITED); assert(item->val->h_tid & GCFLAG_VISITED); + assert(item->addr->h_tid & GCFLAG_OLD); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -364,6 +364,10 @@ { long i, limit = d->num_read_objects_known_old; gcptr *items = d->list_of_read_objects.items; + + if (d->active < 0) + return; // aborts anyway + assert(d->list_of_read_objects.size >= limit); if (d->active == 2) { @@ -509,8 +513,9 @@ !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); - assert(gcptrlist_size(&d->list_of_read_objects) >= - d->num_read_objects_known_old); + assert(IMPLIES(d->active > 0, + gcptrlist_size(&d->list_of_read_objects) >= + d->num_read_objects_known_old)); assert(gcptrlist_size(&d->private_from_protected) >= d->num_private_from_protected_known_old); d->num_read_objects_known_old = From noreply at buildbot.pypy.org Tue Jul 9 18:48:32 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 9 Jul 2013 18:48:32 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: kill ConstObj, BoxObj Message-ID: <20130709164832.5B4961C3367@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65296:1a1aec304f2e Date: 2013-07-09 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/1a1aec304f2e/ Log: kill ConstObj, BoxObj diff --git a/rpython/jit/backend/arm/test/test_calling_convention.py b/rpython/jit/backend/arm/test/test_calling_convention.py --- a/rpython/jit/backend/arm/test/test_calling_convention.py +++ b/rpython/jit/backend/arm/test/test_calling_convention.py @@ -18,8 +18,7 @@ BoxInt, Box, BoxPtr, JitCellToken, TargetToken, ConstInt, ConstPtr, - BoxObj, - ConstObj, BoxFloat, ConstFloat) + BoxFloat, ConstFloat) skip_unless_run_slow_tests() @@ -64,7 +63,7 @@ assert self.cpu.get_int_value(deadframe, x) == x assert self.cpu.get_int_value(deadframe, 11) == 38 - + def test_float_hf_call_mixed(self): if not self.cpu.supports_floats: py.test.skip("requires floats") diff --git a/rpython/jit/backend/arm/test/test_generated.py b/rpython/jit/backend/arm/test/test_generated.py --- a/rpython/jit/backend/arm/test/test_generated.py +++ b/rpython/jit/backend/arm/test/test_generated.py @@ -4,8 +4,7 @@ BasicFailDescr, BoxInt, Box, BoxPtr, ConstInt, ConstPtr, - BoxObj, Const, - ConstObj, BoxFloat, ConstFloat) + Const, BoxFloat, ConstFloat) from rpython.jit.metainterp.history import JitCellToken from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rtyper.test.test_llinterp import interpret diff --git a/rpython/jit/backend/llgraph/test/test_llgraph.py b/rpython/jit/backend/llgraph/test/test_llgraph.py --- a/rpython/jit/backend/llgraph/test/test_llgraph.py +++ b/rpython/jit/backend/llgraph/test/test_llgraph.py @@ -7,7 +7,7 @@ class TestLLTypeLLGraph(LLtypeBackendTest): # for individual tests see: # ====> ../../test/runner_test.py - + def get_cpu(self): return LLGraphCPU(None) @@ -30,18 +30,3 @@ assert llmemory.cast_adr_to_ptr(a2, lltype.Ptr(X)) == x assert heaptracker.adr2int(llmemory.NULL) == 0 assert heaptracker.int2adr(0) == llmemory.NULL - -## these tests never worked -## class TestOOTypeLLGraph(LLGraphTest): -## from rpython.jit.backend.llgraph.runner import OOtypeCPU as cpu_type - -def test_fielddescr_ootype(): - py.test.skip("ootype tests skipped") - from rpython.rtyper.ootypesystem import ootype - from rpython.jit.backend.llgraph.runner import OOtypeCPU - A = ootype.Instance("A", ootype.ROOT, {"foo": ootype.Signed}) - B = ootype.Instance("B", A) - cpu = OOtypeCPU(None) - descr1 = cpu.fielddescrof(A, "foo") - descr2 = cpu.fielddescrof(B, "foo") - assert descr1 is descr2 diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -6,8 +6,7 @@ BoxInt, Box, BoxPtr, JitCellToken, TargetToken, ConstInt, ConstPtr, - BoxObj, - ConstObj, BoxFloat, ConstFloat) + BoxFloat, ConstFloat) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.metainterp.typesystem import deref from rpython.jit.codewriter.effectinfo import EffectInfo @@ -55,7 +54,7 @@ for box in inputargs: if isinstance(box, BoxInt): args.append(box.getint()) - elif isinstance(box, (BoxPtr, BoxObj)): + elif isinstance(box, BoxPtr): args.append(box.getref_base()) elif isinstance(box, BoxFloat): args.append(box.getfloatstorage()) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -1,7 +1,6 @@ from rpython.jit.metainterp.typesystem import deref, fieldType, arrayItem from rpython.rtyper.lltypesystem.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.ootypesystem import ootype from rpython.translator.backendopt.graphanalyze import BoolGraphAnalyzer @@ -223,8 +222,6 @@ def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: return False - if isinstance(TYPE, ootype.OOType): - return True if not isinstance(TYPE, lltype.GcStruct): # can be a non-GC-struct return False if fieldname == "typeptr" and TYPE is OBJECT: @@ -237,8 +234,6 @@ def consider_array(ARRAY): if arrayItem(ARRAY) is lltype.Void: return False - if isinstance(ARRAY, ootype.Array): - return True if not isinstance(ARRAY, lltype.GcArray): # can be a non-GC-array return False return True diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.ootypesystem import ootype from rpython.jit.codewriter.effectinfo import effectinfo_from_writeanalyze,\ EffectInfo @@ -91,17 +90,3 @@ assert not effectinfo.readonly_descrs_fields assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays - -def test_filter_out_ooarray_of_void(): - effects = frozenset([("array", ootype.Array(ootype.Void))]) - effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays - -def test_filter_out_instance_with_void(): - effects = frozenset([("struct", ootype.Instance("x", ootype.ROOT, {"a": ootype.Void}), "a")]) - effectinfo = effectinfo_from_writeanalyze(effects, None) - assert not effectinfo.readonly_descrs_fields - assert not effectinfo.write_descrs_fields - assert not effectinfo.write_descrs_arrays diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -1,6 +1,5 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.ootypesystem import ootype from rpython.rlib.objectmodel import we_are_translated, Symbolic from rpython.rlib.objectmodel import compute_unique_id from rpython.rlib.rarithmetic import r_int64, is_valid_int @@ -48,8 +47,6 @@ return "int" else: return "ref" - elif isinstance(TYPE, ootype.OOType): - return "ref" else: raise NotImplementedError("type %s not supported" % TYPE) getkind._annspecialcase_ = 'specialize:memo' @@ -67,14 +64,7 @@ def repr_object(box): try: TYPE = box.value.obj._TYPE - if TYPE is ootype.String: - return '(%r)' % box.value.obj._str - if TYPE is ootype.Class or isinstance(TYPE, ootype.StaticMethod): - return '(%r)' % box.value.obj - if isinstance(box.value.obj, ootype._view): - return repr(box.value.obj._inst._TYPE) - else: - return repr(TYPE) + return repr(TYPE) except AttributeError: return box.value @@ -359,56 +349,6 @@ CONST_NULL = ConstPtr(ConstPtr.value) -class ConstObj(Const): - type = REF - value = ootype.NULL - _attrs_ = ('value',) - - def __init__(self, value): - assert ootype.typeOf(value) is ootype.Object - self.value = value - - def clonebox(self): - return BoxObj(self.value) - - nonconstbox = clonebox - - def getref_base(self): - return self.value - - def getref(self, OBJ): - return ootype.cast_from_object(OBJ, self.getref_base()) - getref._annspecialcase_ = 'specialize:arg(1)' - - def _get_hash_(self): - if self.value: - return ootype.identityhash(self.value) - else: - return 0 - -## def getaddr(self): -## # so far this is used only when calling -## # CodeWriter.IndirectCallset.bytecode_for_address. We don't need a -## # real addr, but just a key for the dictionary -## return self.value - - def same_constant(self, other): - if isinstance(other, ConstObj): - return self.value == other.value - return False - - def nonnull(self): - return bool(self.value) - - _getrepr_ = repr_object - - def repr_rpython(self): - return repr_rpython(self, 'co') - - def _get_str(self): # for debugging only - from rpython.rtyper.annlowlevel import hlstr - return hlstr(ootype.cast_from_object(ootype.String, self.value)) - class Box(AbstractValue): __slots__ = () _extended_display = True @@ -423,7 +363,6 @@ intval = lltype.cast_primitive(lltype.Signed, x) return BoxInt(intval) elif kind == "ref": - # XXX add ootype support? ptrval = lltype.cast_opaque_ptr(llmemory.GCREF, x) return BoxPtr(ptrval) elif kind == "float": @@ -575,46 +514,6 @@ NULLBOX = BoxPtr() - -class BoxObj(Box): - type = REF - _attrs_ = ('value',) - - def __init__(self, value=ootype.NULL): - assert ootype.typeOf(value) is ootype.Object - self.value = value - - def forget_value(self): - self.value = ootype.NULL - - def clonebox(self): - return BoxObj(self.value) - - def constbox(self): - return ConstObj(self.value) - - def getref_base(self): - return self.value - - def getref(self, OBJ): - return ootype.cast_from_object(OBJ, self.getref_base()) - getref._annspecialcase_ = 'specialize:arg(1)' - - def _get_hash_(self): - if self.value: - return ootype.identityhash(self.value) - else: - return 0 - - def nonnull(self): - return bool(self.value) - - def repr_rpython(self): - return repr_rpython(self, 'bo') - - _getrepr_ = repr_object - - # ____________________________________________________________ @@ -728,7 +627,7 @@ def repr_of_descr(self): return 'TargetToken(%d)' % compute_unique_id(self) - + class TreeLoop(object): inputargs = None operations = None @@ -825,7 +724,7 @@ seen = dict.fromkeys(inputargs) assert len(seen) == len(inputargs), ( "duplicate Box in the LABEL arguments") - + assert operations[-1].is_final() if operations[-1].getopnum() == rop.JUMP: target = operations[-1].getdescr() @@ -834,7 +733,7 @@ def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -950,7 +849,7 @@ def add_jitcell_token(self, token): assert isinstance(token, JitCellToken) self.jitcell_token_wrefs.append(weakref.ref(token)) - + def set_history(self, history): self.operations = history.operations @@ -1043,7 +942,7 @@ opname = op.getopname() insns[opname] = insns.get(opname, 0) + 1 return self._check_insns(insns, expected, check) - + def check_loops(self, expected=None, everywhere=False, **check): insns = {} for loop in self.get_all_loops(): @@ -1066,7 +965,7 @@ print import pdb; pdb.set_trace() return - + for insn, expected_count in check.items(): getattr(rop, insn.upper()) # fails if 'rop.INSN' does not exist found = insns.get(insn, 0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -7,8 +7,7 @@ from rpython.jit.backend.llgraph import runner from rpython.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr, - Const, TreeLoop, BoxObj, - ConstObj, AbstractDescr, + Const, TreeLoop, AbstractDescr, JitCellToken, TargetToken) from rpython.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists from rpython.jit.metainterp.optimize import InvalidLoop @@ -275,79 +274,9 @@ namespace = locals() -class OOtypeMixin_xxx_disabled(object): - type_system = 'ootype' - -## def get_class_of_box(self, box): -## root = box.getref(ootype.ROOT) -## return ootype.classof(root) - -## cpu = runner.OOtypeCPU(None) -## NODE = ootype.Instance('NODE', ootype.ROOT, {}) -## NODE._add_fields({'value': ootype.Signed, -## 'floatval' : ootype.Float, -## 'next': NODE}) -## NODE2 = ootype.Instance('NODE2', NODE, {'other': NODE}) - -## node_vtable = ootype.runtimeClass(NODE) -## node_vtable_adr = ootype.cast_to_object(node_vtable) -## node_vtable2 = ootype.runtimeClass(NODE2) -## node_vtable_adr2 = ootype.cast_to_object(node_vtable2) - -## node = ootype.new(NODE) -## nodebox = BoxObj(ootype.cast_to_object(node)) -## myptr = nodebox.value -## myptr2 = ootype.cast_to_object(ootype.new(NODE)) -## nodebox2 = BoxObj(ootype.cast_to_object(node)) -## valuedescr = cpu.fielddescrof(NODE, 'value') -## floatdescr = cpu.fielddescrof(NODE, 'floatval') -## nextdescr = cpu.fielddescrof(NODE, 'next') -## otherdescr = cpu.fielddescrof(NODE2, 'other') -## nodesize = cpu.typedescrof(NODE) -## nodesize2 = cpu.typedescrof(NODE2) - -## arraydescr = cpu.arraydescrof(ootype.Array(ootype.Signed)) -## floatarraydescr = cpu.arraydescrof(ootype.Array(ootype.Float)) - -## # a plain Record -## S = ootype.Record({'a': ootype.Signed, 'b': NODE}) -## ssize = cpu.typedescrof(S) -## adescr = cpu.fielddescrof(S, 'a') -## bdescr = cpu.fielddescrof(S, 'b') -## sbox = BoxObj(ootype.cast_to_object(ootype.new(S))) -## arraydescr2 = cpu.arraydescrof(ootype.Array(S)) - -## T = ootype.Record({'c': ootype.Signed, -## 'd': ootype.Array(NODE)}) -## tsize = cpu.typedescrof(T) -## cdescr = cpu.fielddescrof(T, 'c') -## ddescr = cpu.fielddescrof(T, 'd') -## arraydescr3 = cpu.arraydescrof(ootype.Array(NODE)) - -## U = ootype.Instance('U', ootype.ROOT, {'one': ootype.Array(NODE)}) -## usize = cpu.typedescrof(U) -## onedescr = cpu.fielddescrof(U, 'one') -## u_vtable = ootype.runtimeClass(U) -## u_vtable_adr = ootype.cast_to_object(u_vtable) - -## # force a consistent order -## valuedescr.sort_key() -## nextdescr.sort_key() -## adescr.sort_key() -## bdescr.sort_key() - -## FUNC = lltype.FuncType([lltype.Signed], lltype.Signed) -## nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT) # XXX fix ootype - -## cpu.class_sizes = {node_vtable_adr: cpu.typedescrof(NODE), -## node_vtable_adr2: cpu.typedescrof(NODE2), -## u_vtable_adr: cpu.typedescrof(U)} -## namespace = locals() - # ____________________________________________________________ - class Fake(object): failargs_limit = 1000 storedebug = None @@ -447,7 +376,7 @@ preamble.inputargs = inputargs preamble.resume_at_jump_descr = FakeDescrWithSnapshot() - token = JitCellToken() + token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ [ResOperation(rop.LABEL, jump_args, None, descr=token)] @@ -460,7 +389,7 @@ loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], - None, descr=token)] + None, descr=token)] #[inliner.inline_op(jumpop)] assert loop.operations[-1].getopnum() == rop.JUMP assert loop.operations[0].getopnum() == rop.LABEL @@ -479,7 +408,7 @@ preamble.operations.insert(-1, op) return preamble - + class FakeDescr(compile.ResumeGuardDescr): def clone_if_mutable(self): diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -83,12 +83,6 @@ adr = llmemory.cast_ptr_to_adr(value) value = heaptracker.adr2int(adr) # fall through to the end of the function - elif isinstance(lltype.typeOf(value), ootype.OOType): - value = ootype.cast_to_object(value) - if in_const_box: - return history.ConstObj(value) - else: - return history.BoxObj(value) elif (isinstance(value, float) or longlong.is_longlong(lltype.typeOf(value))): if isinstance(value, float): diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -81,18 +81,11 @@ if self._consts is None: return name obj = self._consts[name] - if self.type_system == 'lltype': - if typ == 'ptr': - return self.model.ConstPtr(obj) - else: - assert typ == 'class' - return self.model.ConstInt(self.model.ptr_to_int(obj)) + if typ == 'ptr': + return self.model.ConstPtr(obj) else: - if typ == 'ptr': - return self.model.ConstObj(obj) - else: - assert typ == 'class' - return self.model.ConstObj(ootype.cast_to_object(obj)) + assert typ == 'class' + return self.model.ConstInt(self.model.ptr_to_int(obj)) def get_descr(self, poss_descr, allow_invent): if poss_descr.startswith('<'): @@ -180,10 +173,7 @@ elif arg == 'None': return None elif arg == 'NULL': - if self.type_system == 'lltype': - return self.model.ConstPtr(self.model.ConstPtr.value) - else: - return self.model.ConstObj(self.model.ConstObj.value) + return self.model.ConstPtr(self.model.ConstPtr.value) elif arg.startswith('ConstPtr('): name = arg[len('ConstPtr('):-1] return self.get_const(name, 'ptr') diff --git a/rpython/jit/tool/oparser_model.py b/rpython/jit/tool/oparser_model.py --- a/rpython/jit/tool/oparser_model.py +++ b/rpython/jit/tool/oparser_model.py @@ -5,7 +5,7 @@ class LoopModel(object): from rpython.jit.metainterp.history import TreeLoop, JitCellToken from rpython.jit.metainterp.history import Box, BoxInt, BoxFloat - from rpython.jit.metainterp.history import ConstInt, ConstObj, ConstPtr, ConstFloat + from rpython.jit.metainterp.history import ConstInt, ConstPtr, ConstFloat from rpython.jit.metainterp.history import BasicFailDescr, BasicFinalDescr, TargetToken from rpython.jit.metainterp.typesystem import llhelper From noreply at buildbot.pypy.org Tue Jul 9 19:23:07 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 9 Jul 2013 19:23:07 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130709172307.4979A1C3368@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65297:f23adcde38f1 Date: 2013-07-08 15:50 -0700 http://bitbucket.org/pypy/pypy/changeset/f23adcde38f1/ Log: merge default diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -36,7 +36,7 @@ "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_ffi", "_continuation", "_csv", "_cffi_backend", - "_posixsubprocess", # "cppyy", "micronumpy", + "_posixsubprocess", "_pypyjson", # "cppyy", "micronumpy", ] )) diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -306,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -322,8 +335,35 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.1.rst @@ -0,0 +1,78 @@ +====================== +What's new in PyPy 2.1 +====================== + +.. this is a revision shortly after release-2.0 +.. startrev: a13c07067613 + +.. branch: ndarray-ptp +put and array.put + +.. branch: numpy-pickle +Pickling of numpy arrays and dtypes (including record dtypes) + +.. branch: remove-array-smm +Remove multimethods in the arraymodule + +.. branch: callback-stacklet +Fixed bug when switching stacklets from a C callback + +.. branch: remove-set-smm +Remove multi-methods on sets + +.. branch: numpy-subarrays +Implement subarrays for numpy + +.. branch: remove-dict-smm +Remove multi-methods on dict + +.. branch: remove-list-smm-2 +Remove remaining multi-methods on list + +.. branch: arm-stacklet +Stacklet support for ARM, enables _continuation support + +.. branch: remove-tuple-smm +Remove multi-methods on tuple + +.. branch: remove-iter-smm +Remove multi-methods on iterators + +.. branch: emit-call-x86 +.. branch: emit-call-arm + +.. branch: on-abort-resops +Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects + +.. branch: argsort-segfault +Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) + +.. branch: dtype-isnative +.. branch: ndarray-round + +.. branch: faster-str-of-bigint +Improve performance of str(long). + +.. branch: ndarray-view +Add view to ndarray and zeroD arrays, not on dtype scalars yet + +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + +.. branch: identity-set +Faster sets for objects + +.. branch: inline-identityhash +Inline the fast path of id() and hash() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,6 +5,9 @@ .. this is a revision shortly after release-2.0 .. startrev: a13c07067613 +.. branch: ndarray-ptp +put and array.put + .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) @@ -65,6 +68,9 @@ .. branch: ndarray-view Add view to ndarray and zeroD arrays, not on dtype scalars yet +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + .. branch: identity-set Faster sets for objects diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/__init__.py @@ -0,0 +1,10 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """fast json implementation""" + + appleveldefs = {} + + interpleveldefs = { + 'loads' : 'interp_decoder.loads', + } diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -0,0 +1,404 @@ +import sys +import math +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize +from rpython.rlib import rfloat +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import unicodehelper +from rpython.rtyper.annlowlevel import llstr, hlunicode + +OVF_DIGITS = len(str(sys.maxint)) + +def is_whitespace(ch): + return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' + +# precomputing negative powers of 10 is MUCH faster than using e.g. math.pow +# at runtime +NEG_POW_10 = [10.0**-i for i in range(16)] +def neg_pow_10(x, exp): + if exp >= len(NEG_POW_10): + return 0.0 + return x * NEG_POW_10[exp] + +def strslice2unicode_latin1(s, start, end): + """ + Convert s[start:end] to unicode. s is supposed to be an RPython string + encoded in latin-1, which means that the numeric value of each char is the + same as the corresponding unicode code point. + + Internally it's implemented at the level of low-level helpers, to avoid + the extra copy we would need if we take the actual slice first. + + No bound checking is done, use carefully. + """ + from rpython.rtyper.annlowlevel import llstr, hlunicode + from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE + from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar + length = end-start + ll_s = llstr(s) + ll_res = malloc(UNICODE, length) + ll_res.hash = 0 + for i in range(length): + ch = ll_s.chars[start+i] + ll_res.chars[i] = cast_primitive(UniChar, ch) + return hlunicode(ll_res) + +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 +class JSONDecoder(object): + def __init__(self, space, s): + self.space = space + self.s = s + # we put our string in a raw buffer so: + # 1) we automatically get the '\0' sentinel at the end of the string, + # which means that we never have to check for the "end of string" + # 2) we can pass the buffer directly to strtod + self.ll_chars = rffi.str2charp(s) + self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + self.pos = 0 + self.last_type = TYPE_UNKNOWN + + def close(self): + rffi.free_charp(self.ll_chars) + lltype.free(self.end_ptr, flavor='raw') + + def getslice(self, start, end): + assert start >= 0 + assert end >= 0 + return self.s[start:end] + + def skip_whitespace(self, i): + while True: + ch = self.ll_chars[i] + if is_whitespace(ch): + i+=1 + else: + break + return i + + @specialize.arg(1) + def _raise(self, msg, *args): + raise operationerrfmt(self.space.w_ValueError, msg, *args) + + def decode_any(self, i): + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + if ch == '"': + return self.decode_string(i+1) + elif ch == '[': + return self.decode_array(i+1) + elif ch == '{': + return self.decode_object(i+1) + elif ch == 'n': + return self.decode_null(i+1) + elif ch == 't': + return self.decode_true(i+1) + elif ch == 'f': + return self.decode_false(i+1) + elif ch == 'I': + return self.decode_infinity(i+1) + elif ch == 'N': + return self.decode_nan(i+1) + elif ch == '-': + if self.ll_chars[i+1] == 'I': + return self.decode_infinity(i+2, sign=-1) + return self.decode_numeric(i) + elif ch.isdigit(): + return self.decode_numeric(i) + else: + self._raise("No JSON object could be decoded: unexpected '%s' at char %d", + ch, self.pos) + + def decode_null(self, i): + if (self.ll_chars[i] == 'u' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 'l'): + self.pos = i+3 + return self.space.w_None + self._raise("Error when decoding null at char %d", i) + + def decode_true(self, i): + if (self.ll_chars[i] == 'r' and + self.ll_chars[i+1] == 'u' and + self.ll_chars[i+2] == 'e'): + self.pos = i+3 + return self.space.w_True + self._raise("Error when decoding true at char %d", i) + + def decode_false(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 's' and + self.ll_chars[i+3] == 'e'): + self.pos = i+4 + return self.space.w_False + self._raise("Error when decoding false at char %d", i) + + def decode_infinity(self, i, sign=1): + if (self.ll_chars[i] == 'n' and + self.ll_chars[i+1] == 'f' and + self.ll_chars[i+2] == 'i' and + self.ll_chars[i+3] == 'n' and + self.ll_chars[i+4] == 'i' and + self.ll_chars[i+5] == 't' and + self.ll_chars[i+6] == 'y'): + self.pos = i+7 + return self.space.wrap(rfloat.INFINITY * sign) + self._raise("Error when decoding Infinity at char %d", i) + + def decode_nan(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'N'): + self.pos = i+2 + return self.space.wrap(rfloat.NAN) + self._raise("Error when decoding NaN at char %d", i) + + def decode_numeric(self, i): + start = i + i, ovf_maybe, intval = self.parse_integer(i) + # + # check for the optional fractional part + ch = self.ll_chars[i] + if ch == '.': + if not self.ll_chars[i+1].isdigit(): + self._raise("Expected digit at char %d", i+1) + return self.decode_float(start) + elif ch == 'e' or ch == 'E': + return self.decode_float(start) + elif ovf_maybe: + return self.decode_int_slow(start) + + self.pos = i + return self.space.wrap(intval) + + def decode_float(self, i): + from rpython.rlib import rdtoa + start = rffi.ptradd(self.ll_chars, i) + floatval = rdtoa.dg_strtod(start, self.end_ptr) + diff = rffi.cast(rffi.LONG, self.end_ptr[0]) - rffi.cast(rffi.LONG, start) + self.pos = i + diff + return self.space.wrap(floatval) + + def decode_int_slow(self, i): + start = i + if self.ll_chars[i] == '-': + i += 1 + while self.ll_chars[i].isdigit(): + i += 1 + s = self.getslice(start, i) + self.pos = i + return self.space.call_function(self.space.w_int, self.space.wrap(s)) + + def parse_integer(self, i): + "Parse a decimal number with an optional minus sign" + sign = 1 + # parse the sign + if self.ll_chars[i] == '-': + sign = -1 + i += 1 + elif self.ll_chars[i] == '+': + i += 1 + # + if self.ll_chars[i] == '0': + i += 1 + return i, False, 0 + + intval = 0 + start = i + while True: + ch = self.ll_chars[i] + if ch.isdigit(): + intval = intval*10 + ord(ch)-ord('0') + i += 1 + else: + break + count = i - start + if count == 0: + self._raise("Expected digit at char %d", i) + # if the number has more digits than OVF_DIGITS, it might have + # overflowed + ovf_maybe = (count >= OVF_DIGITS) + return i, ovf_maybe, sign * intval + parse_integer._always_inline_ = True + + def decode_array(self, i): + w_list = self.space.newlist([]) + start = i + count = 0 + i = self.skip_whitespace(start) + if self.ll_chars[i] == ']': + self.pos = i+1 + return w_list + # + while True: + w_item = self.decode_any(i) + i = self.pos + self.space.call_method(w_list, 'append', w_item) + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + i += 1 + if ch == ']': + self.pos = i + return w_list + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated array starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding array (char %d)", + ch, self.pos) + + def decode_object(self, i): + start = i + w_dict = self.space.newdict() + # + i = self.skip_whitespace(i) + if self.ll_chars[i] == '}': + self.pos = i+1 + return w_dict + # + while True: + # parse a key: value + self.last_type = TYPE_UNKNOWN + w_name = self.decode_any(i) + if self.last_type != TYPE_STRING: + self._raise("Key name must be string for object starting at char %d", start) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + if ch != ':': + self._raise("No ':' found at char %d", i) + i += 1 + i = self.skip_whitespace(i) + # + w_value = self.decode_any(i) + self.space.setitem(w_dict, w_name, w_value) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + i += 1 + if ch == '}': + self.pos = i + return w_dict + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated object starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding object (char %d)", + ch, self.pos) + + + def decode_string(self, i): + start = i + bits = 0 + while True: + # this loop is a fast path for strings which do not contain escape + # characters + ch = self.ll_chars[i] + i += 1 + bits |= ord(ch) + if ch == '"': + if bits & 0x80: + # the 8th bit is set, it's an utf8 strnig + content_utf8 = self.getslice(start, i-1) + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + else: + # ascii only, fast path (ascii is a strict subset of + # latin1, and we already checked that all the chars are < + # 128) + content_unicode = strslice2unicode_latin1(self.s, start, i-1) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + content_so_far = self.getslice(start, i-1) + self.pos = i-1 + return self.decode_string_escaped(start, content_so_far) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + + + def decode_string_escaped(self, start, content_so_far): + builder = StringBuilder(len(content_so_far)*2) # just an estimate + builder.append(content_so_far) + i = self.pos + while True: + ch = self.ll_chars[i] + i += 1 + if ch == '"': + content_utf8 = builder.build() + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + i = self.decode_escape_sequence(i, builder) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + else: + builder.append_multiple_char(ch, 1) # we should implement append_char + + def decode_escape_sequence(self, i, builder): + ch = self.ll_chars[i] + i += 1 + put = builder.append_multiple_char + if ch == '\\': put('\\', 1) + elif ch == '"': put('"' , 1) + elif ch == '/': put('/' , 1) + elif ch == 'b': put('\b', 1) + elif ch == 'f': put('\f', 1) + elif ch == 'n': put('\n', 1) + elif ch == 'r': put('\r', 1) + elif ch == 't': put('\t', 1) + elif ch == 'u': + return self.decode_escape_sequence_unicode(i, builder) + else: + self._raise("Invalid \\escape: %s (char %d)", ch, self.pos-1) + return i + + def decode_escape_sequence_unicode(self, i, builder): + # at this point we are just after the 'u' of the \u1234 sequence. + start = i + i += 4 + hexdigits = self.getslice(start, i) + try: + val = int(hexdigits, 16) + if val & 0xfc00 == 0xd800: + # surrogate pair + val = self.decode_surrogate_pair(i, val) + i += 6 + except ValueError: + self._raise("Invalid \uXXXX escape (char %d)", i-1) + return # help the annotator to know that we'll never go beyond + # this point + # + uchr = unichr(val) + utf8_ch = unicodehelper.encode_utf8(self.space, uchr) + builder.append(utf8_ch) + return i + + def decode_surrogate_pair(self, i, highsurr): + if self.ll_chars[i] != '\\' or self.ll_chars[i+1] != 'u': + self._raise("Unpaired high surrogate at char %d", i) + i += 2 + hexdigits = self.getslice(i, i+4) + lowsurr = int(hexdigits, 16) # the possible ValueError is caugth by the caller + return 0x10000 + (((highsurr - 0xd800) << 10) | (lowsurr - 0xdc00)) + +def loads(space, w_s): + if space.isinstance_w(w_s, space.w_unicode): + raise OperationError(space.w_TypeError, + space.wrap("Expected utf8-encoded str, got unicode")) + s = space.str_w(w_s) + decoder = JSONDecoder(space, s) + try: + w_res = decoder.decode_any(0) + i = decoder.skip_whitespace(decoder.pos) + if i < len(s): + start = i + end = len(s) - 1 + raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) + return w_res + finally: + decoder.close() diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/targetjson.py @@ -0,0 +1,143 @@ +import sys +import py +ROOT = py.path.local(__file__).dirpath('..', '..', '..') +sys.path.insert(0, str(ROOT)) + +import time +from rpython.rlib.streamio import open_file_as_stream +from pypy.interpreter.error import OperationError +from pypy.module._pypyjson.interp_decoder import loads + + + +## MSG = open('msg.json').read() + +class W_Root(object): + pass + +class W_Dict(W_Root): + def __init__(self): + self.dictval = {} + +class W_Unicode(W_Root): + def __init__(self, x): + self.unival = x + +class W_String(W_Root): + def __init__(self, x): + self.strval = x + +class W_Int(W_Root): + def __init__(self, x): + self.intval = x + +class W_Float(W_Root): + def __init__(self, x): + self.floatval = x + +class W_List(W_Root): + def __init__(self): + self.listval = [] + +class W_Singleton(W_Root): + def __init__(self, name): + self.name = name + +class FakeSpace(object): + + w_None = W_Singleton('None') + w_True = W_Singleton('True') + w_False = W_Singleton('False') + w_ValueError = W_Singleton('ValueError') + w_UnicodeDecodeError = W_Singleton('UnicodeDecodeError') + w_unicode = W_Unicode + w_int = W_Int + w_float = W_Float + + def newtuple(self, items): + return None + + def newdict(self): + return W_Dict() + + def newlist(self, items): + return W_List() + + def isinstance_w(self, w_x, w_type): + return isinstance(w_x, w_type) + + def str_w(self, w_x): + assert isinstance(w_x, W_String) + return w_x.strval + + def call_method(self, obj, name, arg): + assert name == 'append' + assert isinstance(obj, W_List) + obj.listval.append(arg) + call_method._dont_inline_ = True + + def call_function(self, w_func, *args_w): + return self.w_None # XXX + + def setitem(self, d, key, value): + assert isinstance(d, W_Dict) + assert isinstance(key, W_Unicode) + d.dictval[key.unival] = value + + def wrapunicode(self, x): + return W_Unicode(x) + + def wrapint(self, x): + return W_Int(x) + + def wrapfloat(self, x): + return W_Float(x) + + def wrap(self, x): + if isinstance(x, int): + return W_Int(x) + elif isinstance(x, float): + return W_Float(x) + ## elif isinstance(x, str): + ## assert False + else: + return W_Unicode(unicode(x)) + wrap._annspecialcase_ = "specialize:argtype(1)" + + +fakespace = FakeSpace() + +def myloads(msg): + return loads(fakespace, W_String(msg)) + + +def bench(title, N, fn, arg): + a = time.clock() + for i in range(N): + res = fn(arg) + b = time.clock() + print title, (b-a) / N * 1000 + +def entry_point(argv): + if len(argv) != 3: + print 'Usage: %s FILE n' % argv[0] + return 1 + filename = argv[1] + N = int(argv[2]) + f = open_file_as_stream(filename) + msg = f.readall() + + try: + bench('loads ', N, myloads, msg) + except OperationError, e: + print 'Error', e._compute_value(fakespace) + + return 0 + +# _____ Define and setup target ___ + +def target(*args): + return entry_point, None + +if __name__ == '__main__': + entry_point(sys.argv) diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -0,0 +1,188 @@ +# -*- encoding: utf-8 -*- +import py +from pypy.module._pypyjson.interp_decoder import JSONDecoder + +def test_skip_whitespace(): + s = ' hello ' + dec = JSONDecoder('fake space', s) + assert dec.pos == 0 + assert dec.skip_whitespace(0) == 3 + assert dec.skip_whitespace(3) == 3 + assert dec.skip_whitespace(8) == len(s) + dec.close() + + + +class AppTest(object): + spaceconfig = {"objspace.usemodules._pypyjson": True} + + def test_raise_on_unicode(self): + import _pypyjson + raises(TypeError, _pypyjson.loads, u"42") + + + def test_decode_constants(self): + import _pypyjson + assert _pypyjson.loads('null') is None + raises(ValueError, _pypyjson.loads, 'nul') + raises(ValueError, _pypyjson.loads, 'nu') + raises(ValueError, _pypyjson.loads, 'n') + raises(ValueError, _pypyjson.loads, 'nuXX') + # + assert _pypyjson.loads('true') is True + raises(ValueError, _pypyjson.loads, 'tru') + raises(ValueError, _pypyjson.loads, 'tr') + raises(ValueError, _pypyjson.loads, 't') + raises(ValueError, _pypyjson.loads, 'trXX') + # + assert _pypyjson.loads('false') is False + raises(ValueError, _pypyjson.loads, 'fals') + raises(ValueError, _pypyjson.loads, 'fal') + raises(ValueError, _pypyjson.loads, 'fa') + raises(ValueError, _pypyjson.loads, 'f') + raises(ValueError, _pypyjson.loads, 'falXX') + + + def test_decode_string(self): + import _pypyjson + res = _pypyjson.loads('"hello"') + assert res == u'hello' + assert type(res) is unicode + + def test_decode_string_utf8(self): + import _pypyjson + s = u'àèìòù' + res = _pypyjson.loads('"%s"' % s.encode('utf-8')) + assert res == s + + def test_skip_whitespace(self): + import _pypyjson + s = ' "hello" ' + assert _pypyjson.loads(s) == u'hello' + s = ' "hello" extra' + raises(ValueError, "_pypyjson.loads(s)") + + def test_unterminated_string(self): + import _pypyjson + s = '"hello' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence(self): + import _pypyjson + assert _pypyjson.loads(r'"\\"') == u'\\' + assert _pypyjson.loads(r'"\""') == u'"' + assert _pypyjson.loads(r'"\/"') == u'/' + assert _pypyjson.loads(r'"\b"') == u'\b' + assert _pypyjson.loads(r'"\f"') == u'\f' + assert _pypyjson.loads(r'"\n"') == u'\n' + assert _pypyjson.loads(r'"\r"') == u'\r' + assert _pypyjson.loads(r'"\t"') == u'\t' + + def test_escape_sequence_in_the_middle(self): + import _pypyjson + s = r'"hello\nworld"' + assert _pypyjson.loads(s) == "hello\nworld" + + def test_unterminated_string_after_escape_sequence(self): + import _pypyjson + s = r'"hello\nworld' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence_unicode(self): + import _pypyjson + s = r'"\u1234"' + assert _pypyjson.loads(s) == u'\u1234' + + def test_invalid_utf_8(self): + import _pypyjson + s = '"\xe0"' # this is an invalid UTF8 sequence inside a string + raises(UnicodeDecodeError, "_pypyjson.loads(s)") + + def test_decode_numeric(self): + import sys + import _pypyjson + def check(s, val): + res = _pypyjson.loads(s) + assert type(res) is type(val) + assert res == val + # + check('42', 42) + check('-42', -42) + check('42.123', 42.123) + check('42E0', 42.0) + check('42E3', 42000.0) + check('42E-1', 4.2) + check('42E+1', 420.0) + check('42.123E3', 42123.0) + check('0', 0) + check('-0', 0) + check('0.123', 0.123) + check('0E3', 0.0) + check('5E0001', 50.0) + check(str(1 << 32), 1 << 32) + check(str(1 << 64), 1 << 64) + # + x = str(sys.maxint+1) + '.123' + check(x, float(x)) + x = str(sys.maxint+1) + 'E1' + check(x, float(x)) + x = str(sys.maxint+1) + 'E-1' + check(x, float(x)) + # + check('1E400', float('inf')) + ## # these are non-standard but supported by CPython json + check('Infinity', float('inf')) + check('-Infinity', float('-inf')) + + def test_nan(self): + import math + import _pypyjson + res = _pypyjson.loads('NaN') + assert math.isnan(res) + + def test_decode_numeric_invalid(self): + import _pypyjson + def error(s): + raises(ValueError, _pypyjson.loads, s) + # + error(' 42 abc') + error('.123') + error('+123') + error('12.') + error('12.-3') + error('12E') + error('12E-') + error('0123') # numbers can't start with 0 + + def test_decode_object(self): + import _pypyjson + assert _pypyjson.loads('{}') == {} + assert _pypyjson.loads('{ }') == {} + # + s = '{"hello": "world", "aaa": "bbb"}' + assert _pypyjson.loads(s) == {'hello': 'world', + 'aaa': 'bbb'} + raises(ValueError, _pypyjson.loads, '{"key"') + raises(ValueError, _pypyjson.loads, '{"key": 42') + + def test_decode_object_nonstring_key(self): + import _pypyjson + raises(ValueError, "_pypyjson.loads('{42: 43}')") + + def test_decode_array(self): + import _pypyjson + assert _pypyjson.loads('[]') == [] + assert _pypyjson.loads('[ ]') == [] + assert _pypyjson.loads('[1]') == [1] + assert _pypyjson.loads('[1, 2]') == [1, 2] + raises(ValueError, "_pypyjson.loads('[1: 2]')") + raises(ValueError, "_pypyjson.loads('[1, 2')") + raises(ValueError, """_pypyjson.loads('["extra comma",]')""") + + def test_unicode_surrogate_pair(self): + import _pypyjson + expected = u'z\U0001d120x' + res = _pypyjson.loads('"z\\ud834\\udd20x"') + assert res == expected + + diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -65,7 +65,7 @@ [ 3., 4., -1.], [-1., -1., -1.]]) - + NOTE: support for not passing x and y is unsupported """ if space.is_none(w_y): @@ -122,10 +122,10 @@ for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("record type mismatch")) elif dtype.is_record_type() or a_dt.is_record_type(): - raise OperationError(space.w_TypeError, + raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -46,6 +46,7 @@ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.arrayimpl import base +from pypy.module.micronumpy.support import product from rpython.rlib import jit # structures to describe slicing @@ -225,7 +226,7 @@ self.shape = shape self.offset = start self.shapelen = len(shape) - self._done = False + self._done = self.shapelen == 0 or product(shape) == 0 self.strides = strides self.backstrides = backstrides self.size = array.size @@ -284,7 +285,7 @@ self.backstrides = backstrides[:dim] + [0] + backstrides[dim:] self.first_line = True self.indices = [0] * len(shape) - self._done = False + self._done = array.get_size() == 0 self.offset = array.start self.dim = dim self.array = array diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -293,6 +293,14 @@ b = array(a, copy=False, ndmin=4) b[0,0,0,0] = 0 assert a[0, 0] == 0 + a = array([[[]]]) + # Simulate tiling an empty array, really tests repeat, reshape + # b = tile(a, (3, 2, 5)) + reps = (3, 4, 5) + c = array(a, copy=False, subok=True, ndmin=len(reps)) + d = c.reshape(3, 4, 0) + e = d.repeat(3, 0) + assert e.shape == (9, 4, 0) def test_type(self): from numpypy import array @@ -2562,6 +2570,9 @@ a = array(range(100) + range(100) + range(100)) b = a.argsort() assert (b[:3] == [0, 100, 200]).all() + a = array([[[]]]).reshape(3,4,0) + b = a.argsort() + assert b.size == 0 def test_argsort_random(self): from numpypy import array diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -131,18 +131,19 @@ def has_id(self, id): return id in self.ids - def _ops_for_chunk(self, chunk, include_debug_merge_points): + def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point' or include_debug_merge_points: + if op.name != 'debug_merge_point' and \ + (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op - def _allops(self, include_debug_merge_points=False, opcode=None): + def _allops(self, opcode=None, include_guard_not_invalidated=True): opcode_name = opcode for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode_name is None or \ (opcode and opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: for op in chunk.operations: @@ -162,15 +163,15 @@ def print_ops(self, *args, **kwds): print self.format_ops(*args, **kwds) - def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): + def _ops_by_id(self, id, include_guard_not_invalidated=True, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] - loop_ops = self.allops(include_debug_merge_points, opcode) + loop_ops = self.allops(opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): if op in loop_ops: yield op diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -223,5 +223,5 @@ log = self.run(main, [1000]) assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) - ops = loop.ops_by_id('getitem') + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) assert log.opnames(ops) == [] diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr), p25, 16, ConstPtr(ptr70), descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -76,18 +76,22 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing - ## from pypy.tool import gdb_pypy - ## reload(gdb_pypy) - ## gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache - ## self.__class__ = gdb_pypy.RPyType + from pypy.tool import gdb_pypy + reload(gdb_pypy) + gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache + self.__class__ = gdb_pypy.RPyType print self.do_invoke(arg, from_tty) def do_invoke(self, arg, from_tty): - obj = self.gdb.parse_and_eval(arg) - hdr = lookup(obj, '_gcheader') - tid = hdr['h_tid'] - offset = tid & 0xFFFFFFFF # 64bit only - offset = int(offset) # convert from gdb.Value to python int + try: + offset = int(arg) + except ValueError: + obj = self.gdb.parse_and_eval(arg) + hdr = lookup(obj, '_gcheader') + tid = hdr['h_tid'] + offset = tid & 0xFFFFFFFF # 64bit only + offset = int(offset) # convert from gdb.Value to python int + typeids = self.get_typeids() if offset in typeids: return typeids[offset] diff --git a/rpython/jit/backend/arm/test/conftest.py b/rpython/jit/backend/arm/test/conftest.py --- a/rpython/jit/backend/arm/test/conftest.py +++ b/rpython/jit/backend/arm/test/conftest.py @@ -16,7 +16,5 @@ dest="run_translation_tests", help="run tests that translate code") -def pytest_collect_directory(path, parent): - if not cpu.startswith('arm'): - py.test.skip("ARM(v7) tests skipped: cpu is %r" % (cpu,)) -pytest_collect_file = pytest_collect_directory +def pytest_ignore_collect(path, config): + return not cpu.startswith('arm') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -119,7 +119,7 @@ # detect version of current compiler returncode, stdout, stderr = _run_subprocess(self.cc, '', env=self.c_environ) - r = re.match(r'Microsoft.+C/C\+\+.+\s([0-9]+)\.([0-9]+).*', stderr) + r = re.search(r'Microsoft.+C/C\+\+.+\s([0-9]+)\.([0-9]+).*', stderr) if r is not None: self.version = int(''.join(r.groups())) / 10 - 60 else: From noreply at buildbot.pypy.org Tue Jul 9 19:23:08 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 9 Jul 2013 19:23:08 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20130709172308.7E3721C3368@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65298:0035155e3f55 Date: 2013-07-08 15:51 -0700 http://bitbucket.org/pypy/pypy/changeset/0035155e3f55/ Log: 2to3 diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -18,7 +18,7 @@ def test_raise_on_unicode(self): import _pypyjson - raises(TypeError, _pypyjson.loads, u"42") + raises(TypeError, _pypyjson.loads, "42") def test_decode_constants(self): @@ -46,19 +46,19 @@ def test_decode_string(self): import _pypyjson res = _pypyjson.loads('"hello"') - assert res == u'hello' - assert type(res) is unicode + assert res == 'hello' + assert type(res) is str def test_decode_string_utf8(self): import _pypyjson - s = u'àèìòù' + s = 'àèìòù' res = _pypyjson.loads('"%s"' % s.encode('utf-8')) assert res == s def test_skip_whitespace(self): import _pypyjson s = ' "hello" ' - assert _pypyjson.loads(s) == u'hello' + assert _pypyjson.loads(s) == 'hello' s = ' "hello" extra' raises(ValueError, "_pypyjson.loads(s)") @@ -69,14 +69,14 @@ def test_escape_sequence(self): import _pypyjson - assert _pypyjson.loads(r'"\\"') == u'\\' - assert _pypyjson.loads(r'"\""') == u'"' - assert _pypyjson.loads(r'"\/"') == u'/' - assert _pypyjson.loads(r'"\b"') == u'\b' - assert _pypyjson.loads(r'"\f"') == u'\f' - assert _pypyjson.loads(r'"\n"') == u'\n' - assert _pypyjson.loads(r'"\r"') == u'\r' - assert _pypyjson.loads(r'"\t"') == u'\t' + assert _pypyjson.loads(r'"\\"') == '\\' + assert _pypyjson.loads(r'"\""') == '"' + assert _pypyjson.loads(r'"\/"') == '/' + assert _pypyjson.loads(r'"\b"') == '\b' + assert _pypyjson.loads(r'"\f"') == '\f' + assert _pypyjson.loads(r'"\n"') == '\n' + assert _pypyjson.loads(r'"\r"') == '\r' + assert _pypyjson.loads(r'"\t"') == '\t' def test_escape_sequence_in_the_middle(self): import _pypyjson @@ -91,7 +91,7 @@ def test_escape_sequence_unicode(self): import _pypyjson s = r'"\u1234"' - assert _pypyjson.loads(s) == u'\u1234' + assert _pypyjson.loads(s) == '\u1234' def test_invalid_utf_8(self): import _pypyjson @@ -122,11 +122,11 @@ check(str(1 << 32), 1 << 32) check(str(1 << 64), 1 << 64) # - x = str(sys.maxint+1) + '.123' + x = str(sys.maxsize+1) + '.123' check(x, float(x)) - x = str(sys.maxint+1) + 'E1' + x = str(sys.maxsize+1) + 'E1' check(x, float(x)) - x = str(sys.maxint+1) + 'E-1' + x = str(sys.maxsize+1) + 'E-1' check(x, float(x)) # check('1E400', float('inf')) @@ -181,7 +181,7 @@ def test_unicode_surrogate_pair(self): import _pypyjson - expected = u'z\U0001d120x' + expected = 'z\U0001d120x' res = _pypyjson.loads('"z\\ud834\\udd20x"') assert res == expected From noreply at buildbot.pypy.org Tue Jul 9 19:29:02 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 19:29:02 +0200 (CEST) Subject: [pypy-commit] pypy default: write a test Message-ID: <20130709172902.E99F21C336A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65299:e8feb47a0eb9 Date: 2013-07-09 19:28 +0200 http://bitbucket.org/pypy/pypy/changeset/e8feb47a0eb9/ Log: write a test diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space From noreply at buildbot.pypy.org Tue Jul 9 19:29:04 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 19:29:04 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130709172904.4E8281C336A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65300:592758e231f8 Date: 2013-07-09 19:28 +0200 http://bitbucket.org/pypy/pypy/changeset/592758e231f8/ Log: merge diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License From noreply at buildbot.pypy.org Tue Jul 9 20:05:05 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 9 Jul 2013 20:05:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Try to enable fast path for more functions (test so far) Message-ID: <20130709180505.15B081C345C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65301:aa29cd273b2d Date: 2013-07-09 20:04 +0200 http://bitbucket.org/pypy/pypy/changeset/aa29cd273b2d/ Log: Try to enable fast path for more functions (test so far) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) From noreply at buildbot.pypy.org Tue Jul 9 20:21:40 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 9 Jul 2013 20:21:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: freeing the module and function names after sending them to i.a. dlsym Message-ID: <20130709182140.162321C3366@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r491:2e1f503bdb08 Date: 2013-07-05 23:59 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2e1f503bdb08/ Log: freeing the module and function names after sending them to i.a. dlsym diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -1091,7 +1091,9 @@ try: module = dlopen(c_name) except DLOpenError, e: + rffi.free_charp(c_name) raise error.PrimitiveFailedError + try: try: _getModuleName = dlsym(module, "getModuleName") @@ -1127,7 +1129,8 @@ except error.PrimitiveFailedError: dlclose(module) raise - + finally: + rffi.free_charp(c_name) IProxy = _InterpreterProxy() From noreply at buildbot.pypy.org Tue Jul 9 20:21:41 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 9 Jul 2013 20:21:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: refactored the kernel test collection to be faster on interpreted RSqueak VMs Message-ID: <20130709182141.3AA771C3366@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r492:248c652808be Date: 2013-07-08 13:40 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/248c652808be/ Log: refactored the kernel test collection to be faster on interpreted RSqueak VMs diff --git a/SPy-Benchmarks.package/Integer.extension/instance/runTest..st b/SPy-Benchmarks.package/Integer.extension/instance/runTest..st new file mode 100644 --- /dev/null +++ b/SPy-Benchmarks.package/Integer.extension/instance/runTest..st @@ -0,0 +1,3 @@ +*SPy-Benchmarks +runTest: aString + ^SPyRunner runTest: aString asSymbol \ No newline at end of file diff --git a/SPy-Benchmarks.package/Integer.extension/methodProperties.json b/SPy-Benchmarks.package/Integer.extension/methodProperties.json --- a/SPy-Benchmarks.package/Integer.extension/methodProperties.json +++ b/SPy-Benchmarks.package/Integer.extension/methodProperties.json @@ -3,4 +3,5 @@ }, "instance" : { "runSPyBenchmarks" : "lw 4/29/2013 13:20", + "runTest:" : "lw 6/17/2013 13:28", "runTests" : "lw 5/30/2013 18:02" } } diff --git a/SPy-Benchmarks.package/SPyRunner.class/class/initialize.st b/SPy-Benchmarks.package/SPyRunner.class/class/initialize.st new file mode 100644 --- /dev/null +++ b/SPy-Benchmarks.package/SPyRunner.class/class/initialize.st @@ -0,0 +1,5 @@ +benchmarks +initialize + "self initialize" + super initialize. + KernelTests := self kernelTests. \ No newline at end of file diff --git a/SPy-Benchmarks.package/SPyRunner.class/class/kernelTests.st b/SPy-Benchmarks.package/SPyRunner.class/class/kernelTests.st new file mode 100644 --- /dev/null +++ b/SPy-Benchmarks.package/SPyRunner.class/class/kernelTests.st @@ -0,0 +1,7 @@ +benchmarks +kernelTests + | suite | + suite := TestSuite named: 'RSqueakVM-Tests'. + "To add later: MethodPragmaTest . WeakMessageSendTest" + {IntegerTest . InstructionClientTest . FractionTest . DelayTest . CompiledMethodTest . BehaviorTest . StopwatchTest . YearTest . TimeTest . AllocationTest . ProcessTest . ClassDescriptionTest . SmallIntegerTest . MethodContextTest . CompiledMethodComparisonTest . YearMonthWeekTest . TimespanTest . DependentsArrayTest . CategorizerTest . IntegerDigitLogicTest . SemaphoreTest . PromiseTest . DateTest . DateAndTimeEpochTest . InstVarRefLocatorTest . DateAndTimeTest . BasicBehaviorClassMetaclassTest . ExtendedNumberParserTest . TrueTest . UndefinedObjectTest . ComplexTest . ScheduleTest . CompiledMethodTrailerTest . LargePositiveIntegerTest . ScaledDecimalTest . ClassBuilderTest . SqNumberParserTest . ProtoObjectTest . NumberParsingTest . RandomTest . DateAndTimeLeapTest . TimespanDoTest . ClassTest . TimespanDoSpanAYearTest . BlockContextTest . TimeStampTest . GradientFillStyleTest . MethodPropertiesTest . WeekTest . ObjectTest . DurationTest . NumberTest . MonthTest . FalseTest . InstructionPrinterTest . MonitorTest . BooleanTest . BlockClosureTest . FloatTest . ProcessSpecificTest . LargeNegativeIntegerTest} do: [ :each | each addToSuiteFromSelectors: suite]. + ^ suite \ No newline at end of file diff --git a/SPy-Benchmarks.package/SPyRunner.class/class/nonDestroyingTests.st b/SPy-Benchmarks.package/SPyRunner.class/class/nonDestroyingTests.st new file mode 100644 --- /dev/null +++ b/SPy-Benchmarks.package/SPyRunner.class/class/nonDestroyingTests.st @@ -0,0 +1,11 @@ +benchmarks +nonDestroyingTests + | suite | + suite := KernelTests copy. + suite + tests: (suite tests + reject: [ :eachTestCase | + "Those tests lead to VM-Assertion Errors, etc." + #(testBenchFib testAllNamedFromTo testWaitTimeoutMSecs) + includes: eachTestCase selector]). + ^ suite \ No newline at end of file diff --git a/SPy-Benchmarks.package/SPyRunner.class/class/runKernelTests.st b/SPy-Benchmarks.package/SPyRunner.class/class/runKernelTests.st --- a/SPy-Benchmarks.package/SPyRunner.class/class/runKernelTests.st +++ b/SPy-Benchmarks.package/SPyRunner.class/class/runKernelTests.st @@ -1,15 +1,4 @@ benchmarks runKernelTests "self runTests" - | result suite | - suite := TestSuite named: 'RSqueakVM-Tests'. - "To add later: MethodPragmaTest . WeakMessageSendTest" - {IntegerTest . InstructionClientTest . FractionTest . DelayTest . CompiledMethodTest . BehaviorTest . StopwatchTest . YearTest . TimeTest . AllocationTest . ProcessTest . ClassDescriptionTest . SmallIntegerTest . MethodContextTest . CompiledMethodComparisonTest . YearMonthWeekTest . TimespanTest . DependentsArrayTest . CategorizerTest . IntegerDigitLogicTest . SemaphoreTest . PromiseTest . DateTest . DateAndTimeEpochTest . InstVarRefLocatorTest . DateAndTimeTest . BasicBehaviorClassMetaclassTest . ExtendedNumberParserTest . TrueTest . UndefinedObjectTest . ComplexTest . ScheduleTest . CompiledMethodTrailerTest . LargePositiveIntegerTest . ScaledDecimalTest . ClassBuilderTest . SqNumberParserTest . ProtoObjectTest . NumberParsingTest . RandomTest . DateAndTimeLeapTest . TimespanDoTest . ClassTest . TimespanDoSpanAYearTest . BlockContextTest . TimeStampTest . GradientFillStyleTest . MethodPropertiesTest . WeekTest . ObjectTest . DurationTest . NumberTest . MonthTest . FalseTest . InstructionPrinterTest . MonitorTest . BooleanTest . BlockClosureTest . FloatTest . ProcessSpecificTest . LargeNegativeIntegerTest} do: [ :each | each addToSuiteFromSelectors: suite]. - suite - tests: (suite tests - reject: [ :eachTestCase | - "Those tests lead to VM-Assertion Errors, etc." - #(testBenchFib testMultiProcessWaitOnSameDelay testBehaviornewnewShouldNotCrash testAllNamedFromTo testChange testAtomicSuspend testWaitTimeoutMSecs testMonitorNotGainingUnwantedSignalsDuringUnwinding testDegreeCosForExceptionalValues testDegreeSinForExceptionalValues testInfinity1 testInfinity2) - includes: eachTestCase selector]). - result := suite run. - ^result asString \ No newline at end of file + ^ self nonDestroyingTests run asString \ No newline at end of file diff --git a/SPy-Benchmarks.package/SPyRunner.class/class/runShootout.st b/SPy-Benchmarks.package/SPyRunner.class/class/runShootout.st --- a/SPy-Benchmarks.package/SPyRunner.class/class/runShootout.st +++ b/SPy-Benchmarks.package/SPyRunner.class/class/runShootout.st @@ -2,12 +2,10 @@ runShootout "self runShootout explore" | stream times | - stream := (ByteString new: 10000) writeStream. + stream := ShootoutTests stdout. times := Dictionary new. - { [ShootoutTests nbody: 200000 "20000000" to: stream]. - [ShootoutTests binarytrees: 17 to: stream]. - "[ShootoutTests chameneosredux: 2600000 to: stream]." - [ShootoutTests threadring: 100000000 to: stream] } do: + { [ShootoutTests chameneosredux: 2600000 to: stream]. + } do: [:block | | benchmark t | benchmark := (ShootoutTests selectorForSimpleBlock: block) copyUpTo: $:. "Smalltalk garbageCollect." diff --git a/SPy-Benchmarks.package/SPyRunner.class/class/runTest..st b/SPy-Benchmarks.package/SPyRunner.class/class/runTest..st new file mode 100644 --- /dev/null +++ b/SPy-Benchmarks.package/SPyRunner.class/class/runTest..st @@ -0,0 +1,10 @@ +benchmarks +runTest: aSymbol + "self runTests" + | suite | + suite := KernelTests. + suite := suite copy + tests: (suite tests + select: [ :eachTestCase | + eachTestCase selector = aSymbol]). + ^ suite run asString \ No newline at end of file diff --git a/SPy-Benchmarks.package/SPyRunner.class/methodProperties.json b/SPy-Benchmarks.package/SPyRunner.class/methodProperties.json --- a/SPy-Benchmarks.package/SPyRunner.class/methodProperties.json +++ b/SPy-Benchmarks.package/SPyRunner.class/methodProperties.json @@ -1,9 +1,13 @@ { "class" : { - "format:" : "lw 4/29/2013 17:13", + "format:" : "lw 6/17/2013 19:26", + "initialize" : "lw 6/26/2013 16:07", + "kernelTests" : "lw 6/26/2013 16:01", + "nonDestroyingTests" : "lw 6/26/2013 17:04", "run" : "lw 4/29/2013 17:51", - "runKernelTests" : "lw 5/30/2013 18:03", - "runShootout" : "lw 5/3/2013 14:43", + "runKernelTests" : "lw 6/17/2013 13:31", + "runShootout" : "lw 6/27/2013 16:03", + "runTest:" : "lw 6/26/2013 16:06", "runTinyBenchmarks" : "lw 4/29/2013 17:39" }, "instance" : { } } diff --git a/SPy-Benchmarks.package/SPyRunner.class/properties.json b/SPy-Benchmarks.package/SPyRunner.class/properties.json --- a/SPy-Benchmarks.package/SPyRunner.class/properties.json +++ b/SPy-Benchmarks.package/SPyRunner.class/properties.json @@ -3,7 +3,7 @@ "classinstvars" : [ ], "classvars" : [ - ], + "KernelTests" ], "commentStamp" : "", "instvars" : [ ], diff --git a/SPy-Benchmarks.package/TestSuite.extension/instance/run..st b/SPy-Benchmarks.package/TestSuite.extension/instance/run..st --- a/SPy-Benchmarks.package/TestSuite.extension/instance/run..st +++ b/SPy-Benchmarks.package/TestSuite.extension/instance/run..st @@ -2,5 +2,5 @@ run: aResult self tests do: [:each | self changed: each. - SPyVM print: 'Running Test ', (each asString padded: #right to: 62 with: $ ), String tab, '(', aResult asString, ')'. + SPyVM print: 'Running Test ', each asString. each run: aResult]. \ No newline at end of file diff --git a/SPy-Benchmarks.package/TestSuite.extension/methodProperties.json b/SPy-Benchmarks.package/TestSuite.extension/methodProperties.json --- a/SPy-Benchmarks.package/TestSuite.extension/methodProperties.json +++ b/SPy-Benchmarks.package/TestSuite.extension/methodProperties.json @@ -2,5 +2,5 @@ "class" : { }, "instance" : { - "run:" : "lw 5/30/2013 11:19", + "run:" : "lw 6/18/2013 10:13", "tests:" : "lw 5/29/2013 20:14" } } diff --git a/SPy-Benchmarks.package/monticello.meta/version b/SPy-Benchmarks.package/monticello.meta/version --- a/SPy-Benchmarks.package/monticello.meta/version +++ b/SPy-Benchmarks.package/monticello.meta/version @@ -1,1 +1,1 @@ -(name 'SPy-Benchmarks-lw.6' message 'added testing messages and modified TestSuite to print stuff' id '72f3d7a3-5e09-43e5-a783-fb7c29117a52' date '31 May 2013' time '9:35:44.102 am' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.5' message 'added another benchmark' id 'cfe2797f-9dd9-4073-aa6e-86cda0ba3dbf' date '3 May 2013' time '2:43:39.36 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.4' message 'changed the test running and collecting to work with the current spy vm removed two of the shootout tests due to failure on spy' id '9d1c1e0a-0209-45d3-8e0a-220919ab5701' date '29 April 2013' time '6:07:26.686 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.3' message 'added tiny benchmarks' id 'c8214449-4009-4a64-8284-3c58395fe2bc' date '29 April 2013' time '2:15:43.242 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.2' message 'second try for an initial commit with shootout tests' id 'e538d5dc-ff13-4753-a166-bb95af0c7e0b' date '29 April 2013' time '1:41:50.098 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.1' message 'initial commit with existing Shootout tests' id '67ba6a6a-5476-4dc0-892f-de76933491e8' date '29 April 2013' time '1:40:20.34 pm' author 'lw' ancestors () stepChildren ())) stepChildren ())) stepChildren ())) stepChildren ())) stepChildren ())) stepChildren ()) \ No newline at end of file +(name 'SPy-Benchmarks-lw.7' message 'refactored kernel tests' id '0130d8ee-c75a-478c-93ca-46e55b658f2e' date '8 July 2013' time '11:15:47.373 am' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.6' message 'added testing messages and modified TestSuite to print stuff' id '72f3d7a3-5e09-43e5-a783-fb7c29117a52' date '31 May 2013' time '9:35:44.102 am' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.5' message 'added another benchmark' id 'cfe2797f-9dd9-4073-aa6e-86cda0ba3dbf' date '3 May 2013' time '2:43:39.36 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.4' message 'changed the test running and collecting to work with the current spy vm removed two of the shootout tests due to failure on spy' id '9d1c1e0a-0209-45d3-8e0a-220919ab5701' date '29 April 2013' time '6:07:26.686 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.3' message 'added tiny benchmarks' id 'c8214449-4009-4a64-8284-3c58395fe2bc' date '29 April 2013' time '2:15:43.242 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.2' message 'second try for an initial commit with shootout tests' id 'e538d5dc-ff13-4753-a166-bb95af0c7e0b' date '29 April 2013' time '1:41:50.098 pm' author 'lw' ancestors ((name 'SPy-Benchmarks-lw.1' message 'initial commit with existing Shootout tests' id '67ba6a6a-5476-4dc0-892f-de76933491e8' date '29 April 2013' time '1:40:20.34 pm' author 'lw' ancestors () stepChildren ())) stepChildren ())) stepChildren ())) stepChildren ())) stepChildren ())) stepChildren ())) stepChildren ()) \ No newline at end of file From noreply at buildbot.pypy.org Tue Jul 9 20:21:42 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 9 Jul 2013 20:21:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added version object to the image, thus allowing in startup to decide whether to have a lower bound for artifical processes priority or not Message-ID: <20130709182142.38C7C1C3366@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r493:6c97c6bc7fc1 Date: 2013-07-09 20:20 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6c97c6bc7fc1/ Log: added version object to the image, thus allowing in startup to decide whether to have a lower bound for artifical processes priority or not diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -374,6 +374,7 @@ self.w_asSymbol = self.find_symbol(space, reader, "asSymbol") self.w_simulateCopyBits = self.find_symbol(space, reader, "simulateCopyBits") self.lastWindowSize = reader.lastWindowSize + self.version = reader.version def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -32,7 +32,8 @@ # third variable is priority priority = space.unwrap_int(w_hpp.fetch(space, 2)) / 2 + 1 # Priorities below 10 are not allowed in newer versions of Squeak. - priority = max(11, priority) + if interp.image.version.has_closures: + priority = max(11, priority) w_benchmark_proc.store(space, 2, space.wrap_int(priority)) # make process eligible for scheduling From noreply at buildbot.pypy.org Tue Jul 9 20:21:43 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 9 Jul 2013 20:21:43 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added SmallInteger>>#benchmarkExampleOne and prepared changing the copybits primitive number from 96 to the one for external call (117) Message-ID: <20130709182143.7C6EF1C3366@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r494:acba2d191ee3 Date: 2013-07-09 20:21 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/acba2d191ee3/ Log: added SmallInteger>>#benchmarkExampleOne and prepared changing the copybits primitive number from 96 to the one for external call (117) diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index e14767581ade29ce25d0c0fa8bdd3aef9ebc6a51..9171a81055a32b5c93cf278576525da9af88c942 GIT binary patch [cut] From noreply at buildbot.pypy.org Tue Jul 9 21:36:24 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 9 Jul 2013 21:36:24 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: merge default Message-ID: <20130709193624.C8AD61C3366@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65302:cf4e3cb3bd79 Date: 2013-07-09 21:32 +0200 http://bitbucket.org/pypy/pypy/changeset/cf4e3cb3bd79/ Log: merge default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,66 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture. + +* Various numpy improvements. + +* Bugfixes to cffi and ctypes. + +* Bugfixes to the stacklet support + +* Improved logging performance + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -708,7 +708,7 @@ return MapDictIteratorValues(self.space, self, w_dict) def iteritems(self, w_dict): return MapDictIteratorItems(self.space, self, w_dict) - + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() @@ -716,69 +716,69 @@ _become(obj, new_obj) class MapDictIteratorKeys(BaseKeyIterator): - def __init__(self, space, strategy, dictimplementation): - BaseKeyIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseKeyIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_key_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr - return None + def next_key_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr + return None + class MapDictIteratorValues(BaseValueIterator): - def __init__(self, space, strategy, dictimplementation): - BaseValueIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseValueIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_value_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - return self.w_obj.getdictvalue(self.space, attr) - return None + def next_value_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + return self.w_obj.getdictvalue(self.space, attr) + return None + class MapDictIteratorItems(BaseItemIterator): - def __init__(self, space, strategy, dictimplementation): - BaseItemIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseItemIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_item_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None, None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr, self.w_obj.getdictvalue(self.space, attr) - return None, None + def next_item_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None, None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr, self.w_obj.getdictvalue(self.space, attr) + return None, None + # ____________________________________________________________ # Magic caching @@ -860,7 +860,7 @@ # selector = ("", INVALID) if w_descr is None: - selector = (name, DICT) #common case: no such attr in the class + selector = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, TypeCell): pass # we have a TypeCell in the class: give up elif space.is_data_descr(w_descr): @@ -890,7 +890,6 @@ LOAD_ATTR_slowpath._dont_inline_ = True def LOOKUP_METHOD_mapdict(f, nameindex, w_obj): - space = f.space pycode = f.getcode() entry = pycode._mapdict_caches[nameindex] if entry.is_valid_for_obj(w_obj): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -740,6 +740,34 @@ s = a.build_types(f, [B]) assert s.classdef is a.bookkeeper.getuniqueclassdef(C) + def test_union_type_some_opbc(self): + class A(object): + name = "A" + + def f(self): + return type(self) + + class B(A): + name = "B" + + def f(tp): + return tp + + def main(n): + if n: + if n == 1: + inst = A() + else: + inst = B() + arg = inst.f() + else: + arg = B + return f(arg).name + + a = self.RPythonAnnotator() + s = a.build_types(main, [int]) + assert isinstance(s, annmodel.SomeString) + def test_ann_assert(self): def assert_(x): assert x,"XXX" From noreply at buildbot.pypy.org Tue Jul 9 21:36:26 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 9 Jul 2013 21:36:26 +0200 (CEST) Subject: [pypy-commit] pypy improve-str2charp: close about-to-be-merged branch Message-ID: <20130709193626.27AF21C3366@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: improve-str2charp Changeset: r65303:faaf5e9bdce2 Date: 2013-07-09 21:32 +0200 http://bitbucket.org/pypy/pypy/changeset/faaf5e9bdce2/ Log: close about-to-be-merged branch From noreply at buildbot.pypy.org Tue Jul 9 21:36:27 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 9 Jul 2013 21:36:27 +0200 (CEST) Subject: [pypy-commit] pypy default: merge the improve-str2charp branch: now operations like str2charp and get_nonmovingbuffer are implemented by calling memcpy instead of copying char-by-char explicitly. The results is that str2charp is now 31x faster (31, it's not a typo) and I/O bound benchmarks like twisted_tcp and raytrace-simple are ~15% faster Message-ID: <20130709193627.6C3631C3366@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65304:cffbba0d1078 Date: 2013-07-09 21:35 +0200 http://bitbucket.org/pypy/pypy/changeset/cffbba0d1078/ Log: merge the improve-str2charp branch: now operations like str2charp and get_nonmovingbuffer are implemented by calling memcpy instead of copying char-by-char explicitly. The results is that str2charp is now 31x faster (31, it's not a typo) and I/O bound benchmarks like twisted_tcp and raytrace-simple are ~15% faster diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -79,6 +79,9 @@ # OS_RAW_MALLOC_VARSIZE_CHAR = 110 OS_RAW_FREE = 111 + # + OS_STR_COPY_TO_RAW = 112 + OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1665,12 +1665,14 @@ dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, "stroruni.slice": EffectInfo.OS_STR_SLICE, "stroruni.equal": EffectInfo.OS_STR_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_STR_COPY_TO_RAW, } CHR = lltype.Char elif SoU.TO == rstr.UNICODE: dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, "stroruni.slice": EffectInfo.OS_UNI_SLICE, "stroruni.equal": EffectInfo.OS_UNI_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar else: diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -677,7 +677,8 @@ def make_string_mappings(strtype): if strtype is str: - from rpython.rtyper.lltypesystem.rstr import STR as STRTYPE + from rpython.rtyper.lltypesystem.rstr import (STR as STRTYPE, + copy_string_to_raw) from rpython.rtyper.annlowlevel import llstr as llstrtype from rpython.rtyper.annlowlevel import hlstr as hlstrtype TYPEP = CCHARP @@ -685,7 +686,9 @@ lastchar = '\x00' builder_class = StringBuilder else: - from rpython.rtyper.lltypesystem.rstr import UNICODE as STRTYPE + from rpython.rtyper.lltypesystem.rstr import ( + UNICODE as STRTYPE, + copy_unicode_to_raw as copy_string_to_raw) from rpython.rtyper.annlowlevel import llunicode as llstrtype from rpython.rtyper.annlowlevel import hlunicode as hlstrtype TYPEP = CWCHARP @@ -702,11 +705,9 @@ else: array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='raw', track_allocation=False) i = len(s) + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, 0, i) array[i] = lastchar - i -= 1 - while i >= 0: - array[i] = s[i] - i -= 1 return array str2charp._annenforceargs_ = [strtype, bool] @@ -739,14 +740,14 @@ string is already nonmovable. Must be followed by a free_nonmovingbuffer call. """ + lldata = llstrtype(data) if rgc.can_move(data): count = len(data) buf = lltype.malloc(TYPEP.TO, count, flavor='raw') - for i in range(count): - buf[i] = data[i] + copy_string_to_raw(lldata, buf, 0, count) return buf else: - data_start = cast_ptr_to_adr(llstrtype(data)) + \ + data_start = cast_ptr_to_adr(lldata) + \ offsetof(STRTYPE, 'chars') + itemoffsetof(STRTYPE.chars, 0) return cast(TYPEP, data_start) get_nonmovingbuffer._annenforceargs_ = [strtype] diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -49,16 +49,18 @@ def emptyunicodefun(): return emptyunicode -def _new_copy_contents_fun(SRC_TP, DST_TP, CHAR_TP, name): - def _str_ofs_src(item): - return (llmemory.offsetof(SRC_TP, 'chars') + - llmemory.itemoffsetof(SRC_TP.chars, 0) + +def _new_copy_contents_fun(STR_TP, CHAR_TP, name): + def _str_ofs(item): + return (llmemory.offsetof(STR_TP, 'chars') + + llmemory.itemoffsetof(STR_TP.chars, 0) + llmemory.sizeof(CHAR_TP) * item) - def _str_ofs_dst(item): - return (llmemory.offsetof(DST_TP, 'chars') + - llmemory.itemoffsetof(DST_TP.chars, 0) + - llmemory.sizeof(CHAR_TP) * item) + @signature(types.any(), types.int(), returns=types.any()) + def _get_raw_buf(src, ofs): + assert typeOf(src).TO == STR_TP + assert ofs >= 0 + return llmemory.cast_ptr_to_adr(src) + _str_ofs(ofs) + _get_raw_buf._always_inline_ = True @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @signature(types.any(), types.any(), types.int(), types.int(), types.int(), returns=types.none()) @@ -71,22 +73,38 @@ # because it might move the strings. The keepalive_until_here() # are obscurely essential to make sure that the strings stay alive # longer than the raw_memcopy(). - assert typeOf(src).TO == SRC_TP - assert typeOf(dst).TO == DST_TP - assert srcstart >= 0 - assert dststart >= 0 assert length >= 0 - src = llmemory.cast_ptr_to_adr(src) + _str_ofs_src(srcstart) - dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs_dst(dststart) + src = _get_raw_buf(src, srcstart) + dst = _get_raw_buf(dst, dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) keepalive_until_here(src) keepalive_until_here(dst) copy_string_contents._always_inline_ = True - return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) + copy_string_contents = func_with_new_name(copy_string_contents, + 'copy_%s_contents' % name) -copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') -copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, - 'unicode') + @jit.oopspec('stroruni.copy_string_to_raw(src, ptrdst, srcstart, length)') + def copy_string_to_raw(src, ptrdst, srcstart, length): + """ + Copies 'length' characters from the 'src' string to the 'ptrdst' + buffer, starting at position 'srcstart'. + 'ptrdst' must be a non-gc Array of Char. + """ + # xxx Warning: same note as above apply: don't do this at home + assert length >= 0 + src = _get_raw_buf(src, srcstart) + adr = llmemory.cast_ptr_to_adr(ptrdst) + dstbuf = adr + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0) + llmemory.raw_memcopy(src, dstbuf, llmemory.sizeof(CHAR_TP) * length) + keepalive_until_here(src) + copy_string_to_raw._always_inline_ = True + copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) + + return copy_string_to_raw, copy_string_contents + +copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') +copy_unicode_to_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, + UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() CONST_UNICODE_CACHE = WeakValueDictionary() diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -1118,6 +1118,26 @@ res = self.interpret(f, [5]) assert res == 0 + def test_copy_string_to_raw(self): + from rpython.rtyper.lltypesystem import lltype, llmemory + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + + def f(buf, n): + s = 'abc' * n + ll_s = llstr(s) + copy_string_to_raw(ll_s, buf, 0, n*3) + + TP = lltype.Array(lltype.Char) + array = lltype.malloc(TP, 12, flavor='raw') + f(array, 4) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') + + array = lltype.malloc(TP, 12, flavor='raw') + self.interpret(f, [array, 4]) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') class TestOOtype(BaseTestRstr, OORtypeMixin): pass From noreply at buildbot.pypy.org Tue Jul 9 21:44:47 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 9 Jul 2013 21:44:47 +0200 (CEST) Subject: [pypy-commit] pypy default: (fijal) add comments to make explicit which sections cannot contain any GC operation Message-ID: <20130709194447.85F9E1C3364@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65305:f4668ca77047 Date: 2013-07-09 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/f4668ca77047/ Log: (fijal) add comments to make explicit which sections cannot contain any GC operation diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -74,9 +74,11 @@ # are obscurely essential to make sure that the strings stay alive # longer than the raw_memcopy(). assert length >= 0 + # from here, no GC operations can happen src = _get_raw_buf(src, srcstart) dst = _get_raw_buf(dst, dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) + # end of "no GC" section keepalive_until_here(src) keepalive_until_here(dst) copy_string_contents._always_inline_ = True @@ -92,10 +94,12 @@ """ # xxx Warning: same note as above apply: don't do this at home assert length >= 0 + # from here, no GC operations can happen src = _get_raw_buf(src, srcstart) adr = llmemory.cast_ptr_to_adr(ptrdst) dstbuf = adr + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0) llmemory.raw_memcopy(src, dstbuf, llmemory.sizeof(CHAR_TP) * length) + # end of "no GC" section keepalive_until_here(src) copy_string_to_raw._always_inline_ = True copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) From noreply at buildbot.pypy.org Tue Jul 9 22:09:33 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 9 Jul 2013 22:09:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130709200933.99A661C3364@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65306:f4bbbcaa8ea3 Date: 2013-07-09 13:08 -0700 http://bitbucket.org/pypy/pypy/changeset/f4bbbcaa8ea3/ Log: merge default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,66 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture. + +* Various numpy improvements. + +* Bugfixes to cffi and ctypes. + +* Bugfixes to the stacklet support + +* Improved logging performance + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -359,7 +359,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -432,21 +432,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -609,6 +609,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -706,7 +706,7 @@ return MapDictIteratorValues(self.space, self, w_dict) def iteritems(self, w_dict): return MapDictIteratorItems(self.space, self, w_dict) - + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() @@ -714,69 +714,69 @@ _become(obj, new_obj) class MapDictIteratorKeys(BaseKeyIterator): - def __init__(self, space, strategy, dictimplementation): - BaseKeyIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseKeyIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_key_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr - return None + def next_key_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr + return None + class MapDictIteratorValues(BaseValueIterator): - def __init__(self, space, strategy, dictimplementation): - BaseValueIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseValueIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_value_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - return self.w_obj.getdictvalue(self.space, attr) - return None + def next_value_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + return self.w_obj.getdictvalue(self.space, attr) + return None + class MapDictIteratorItems(BaseItemIterator): - def __init__(self, space, strategy, dictimplementation): - BaseItemIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseItemIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_item_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None, None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr, self.w_obj.getdictvalue(self.space, attr) - return None, None + def next_item_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None, None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr, self.w_obj.getdictvalue(self.space, attr) + return None, None + # ____________________________________________________________ # Magic caching @@ -858,7 +858,7 @@ # selector = ("", INVALID) if w_descr is None: - selector = (name, DICT) #common case: no such attr in the class + selector = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, TypeCell): pass # we have a TypeCell in the class: give up elif space.is_data_descr(w_descr): @@ -888,7 +888,6 @@ LOAD_ATTR_slowpath._dont_inline_ = True def LOOKUP_METHOD_mapdict(f, nameindex, w_obj): - space = f.space pycode = f.getcode() entry = pycode._mapdict_caches[nameindex] if entry.is_valid_for_obj(w_obj): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -740,6 +740,34 @@ s = a.build_types(f, [B]) assert s.classdef is a.bookkeeper.getuniqueclassdef(C) + def test_union_type_some_opbc(self): + class A(object): + name = "A" + + def f(self): + return type(self) + + class B(A): + name = "B" + + def f(tp): + return tp + + def main(n): + if n: + if n == 1: + inst = A() + else: + inst = B() + arg = inst.f() + else: + arg = B + return f(arg).name + + a = self.RPythonAnnotator() + s = a.build_types(main, [int]) + assert isinstance(s, annmodel.SomeString) + def test_ann_assert(self): def assert_(x): assert x,"XXX" diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -79,6 +79,9 @@ # OS_RAW_MALLOC_VARSIZE_CHAR = 110 OS_RAW_FREE = 111 + # + OS_STR_COPY_TO_RAW = 112 + OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1665,12 +1665,14 @@ dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, "stroruni.slice": EffectInfo.OS_STR_SLICE, "stroruni.equal": EffectInfo.OS_STR_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_STR_COPY_TO_RAW, } CHR = lltype.Char elif SoU.TO == rstr.UNICODE: dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, "stroruni.slice": EffectInfo.OS_UNI_SLICE, "stroruni.equal": EffectInfo.OS_UNI_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar else: diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -677,7 +677,8 @@ def make_string_mappings(strtype): if strtype is str: - from rpython.rtyper.lltypesystem.rstr import STR as STRTYPE + from rpython.rtyper.lltypesystem.rstr import (STR as STRTYPE, + copy_string_to_raw) from rpython.rtyper.annlowlevel import llstr as llstrtype from rpython.rtyper.annlowlevel import hlstr as hlstrtype TYPEP = CCHARP @@ -685,7 +686,9 @@ lastchar = '\x00' builder_class = StringBuilder else: - from rpython.rtyper.lltypesystem.rstr import UNICODE as STRTYPE + from rpython.rtyper.lltypesystem.rstr import ( + UNICODE as STRTYPE, + copy_unicode_to_raw as copy_string_to_raw) from rpython.rtyper.annlowlevel import llunicode as llstrtype from rpython.rtyper.annlowlevel import hlunicode as hlstrtype TYPEP = CWCHARP @@ -702,11 +705,9 @@ else: array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='raw', track_allocation=False) i = len(s) + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, 0, i) array[i] = lastchar - i -= 1 - while i >= 0: - array[i] = s[i] - i -= 1 return array str2charp._annenforceargs_ = [strtype, bool] @@ -739,14 +740,14 @@ string is already nonmovable. Must be followed by a free_nonmovingbuffer call. """ + lldata = llstrtype(data) if rgc.can_move(data): count = len(data) buf = lltype.malloc(TYPEP.TO, count, flavor='raw') - for i in range(count): - buf[i] = data[i] + copy_string_to_raw(lldata, buf, 0, count) return buf else: - data_start = cast_ptr_to_adr(llstrtype(data)) + \ + data_start = cast_ptr_to_adr(lldata) + \ offsetof(STRTYPE, 'chars') + itemoffsetof(STRTYPE.chars, 0) return cast(TYPEP, data_start) get_nonmovingbuffer._annenforceargs_ = [strtype] diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -49,16 +49,18 @@ def emptyunicodefun(): return emptyunicode -def _new_copy_contents_fun(SRC_TP, DST_TP, CHAR_TP, name): - def _str_ofs_src(item): - return (llmemory.offsetof(SRC_TP, 'chars') + - llmemory.itemoffsetof(SRC_TP.chars, 0) + +def _new_copy_contents_fun(STR_TP, CHAR_TP, name): + def _str_ofs(item): + return (llmemory.offsetof(STR_TP, 'chars') + + llmemory.itemoffsetof(STR_TP.chars, 0) + llmemory.sizeof(CHAR_TP) * item) - def _str_ofs_dst(item): - return (llmemory.offsetof(DST_TP, 'chars') + - llmemory.itemoffsetof(DST_TP.chars, 0) + - llmemory.sizeof(CHAR_TP) * item) + @signature(types.any(), types.int(), returns=types.any()) + def _get_raw_buf(src, ofs): + assert typeOf(src).TO == STR_TP + assert ofs >= 0 + return llmemory.cast_ptr_to_adr(src) + _str_ofs(ofs) + _get_raw_buf._always_inline_ = True @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @signature(types.any(), types.any(), types.int(), types.int(), types.int(), returns=types.none()) @@ -71,22 +73,42 @@ # because it might move the strings. The keepalive_until_here() # are obscurely essential to make sure that the strings stay alive # longer than the raw_memcopy(). - assert typeOf(src).TO == SRC_TP - assert typeOf(dst).TO == DST_TP - assert srcstart >= 0 - assert dststart >= 0 assert length >= 0 - src = llmemory.cast_ptr_to_adr(src) + _str_ofs_src(srcstart) - dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs_dst(dststart) + # from here, no GC operations can happen + src = _get_raw_buf(src, srcstart) + dst = _get_raw_buf(dst, dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) + # end of "no GC" section keepalive_until_here(src) keepalive_until_here(dst) copy_string_contents._always_inline_ = True - return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) + copy_string_contents = func_with_new_name(copy_string_contents, + 'copy_%s_contents' % name) -copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') -copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, - 'unicode') + @jit.oopspec('stroruni.copy_string_to_raw(src, ptrdst, srcstart, length)') + def copy_string_to_raw(src, ptrdst, srcstart, length): + """ + Copies 'length' characters from the 'src' string to the 'ptrdst' + buffer, starting at position 'srcstart'. + 'ptrdst' must be a non-gc Array of Char. + """ + # xxx Warning: same note as above apply: don't do this at home + assert length >= 0 + # from here, no GC operations can happen + src = _get_raw_buf(src, srcstart) + adr = llmemory.cast_ptr_to_adr(ptrdst) + dstbuf = adr + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0) + llmemory.raw_memcopy(src, dstbuf, llmemory.sizeof(CHAR_TP) * length) + # end of "no GC" section + keepalive_until_here(src) + copy_string_to_raw._always_inline_ = True + copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) + + return copy_string_to_raw, copy_string_contents + +copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') +copy_unicode_to_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, + UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() CONST_UNICODE_CACHE = WeakValueDictionary() diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -1118,6 +1118,26 @@ res = self.interpret(f, [5]) assert res == 0 + def test_copy_string_to_raw(self): + from rpython.rtyper.lltypesystem import lltype, llmemory + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + + def f(buf, n): + s = 'abc' * n + ll_s = llstr(s) + copy_string_to_raw(ll_s, buf, 0, n*3) + + TP = lltype.Array(lltype.Char) + array = lltype.malloc(TP, 12, flavor='raw') + f(array, 4) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') + + array = lltype.malloc(TP, 12, flavor='raw') + self.interpret(f, [array, 4]) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') class TestOOtype(BaseTestRstr, OORtypeMixin): pass From noreply at buildbot.pypy.org Wed Jul 10 06:13:42 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 10 Jul 2013 06:13:42 +0200 (CEST) Subject: [pypy-commit] pypy default: A failing test for a dict where a key is a PBC. Message-ID: <20130710041342.D9D971C3364@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65307:c10c53a1bde6 Date: 2013-07-10 08:56 +1000 http://bitbucket.org/pypy/pypy/changeset/c10c53a1bde6/ Log: A failing test for a dict where a key is a PBC. diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -496,6 +496,29 @@ res = self.interpret(f, [6]) assert res == 0 + def test_cls_dict(self): + class A(object): + pass + + class B(A): + pass + + def f(i): + d = { + A: 3, + B: 4, + } + if i: + cls = A + else: + cls = B + return d[cls] + + res = self.interpret(f, [1]) + assert res == 3 + res = self.interpret(f, [0]) + assert res == 4 + def test_access_in_try(self): def f(d): try: From noreply at buildbot.pypy.org Wed Jul 10 06:13:44 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 10 Jul 2013 06:13:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Corrected a bunch of code that wasn't using 4-space indents. Message-ID: <20130710041344.325711C3367@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65308:719618bd0eac Date: 2013-07-10 09:20 +1000 http://bitbucket.org/pypy/pypy/changeset/719618bd0eac/ Log: Corrected a bunch of code that wasn't using 4-space indents. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -881,15 +881,15 @@ assert "0 ('hi')" not in output.getvalue() def test_print_to(self): - exec """if 1: - from StringIO import StringIO - s = StringIO() - print >> s, "hi", "lovely!" - assert s.getvalue() == "hi lovely!\\n" - s = StringIO() - print >> s, "hi", "lovely!", - assert s.getvalue() == "hi lovely!" - """ in {} + exec """if 1: + from StringIO import StringIO + s = StringIO() + print >> s, "hi", "lovely!" + assert s.getvalue() == "hi lovely!\\n" + s = StringIO() + print >> s, "hi", "lovely!", + assert s.getvalue() == "hi lovely!" + """ in {} def test_assert_with_tuple_arg(self): try: diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -111,7 +111,7 @@ enc = None if need_encoding: - enc = encoding + enc = encoding v = PyString_DecodeEscape(space, substr, enc) return space.wrap(v) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -636,11 +636,11 @@ assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL if i < 5: - def bomb(*args): - assert False, "shortcutting should have avoided this" + def bomb(*args): + assert False, "shortcutting should have avoided this" - code.funcrun = bomb - code.funcrun_obj = bomb + code.funcrun = bomb + code.funcrun_obj = bomb args_w = map(space.wrap, range(i)) w_res = space.call_function(fn, *args_w) diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -102,26 +102,26 @@ } def pick_builtin(self, w_globals): - "Look up the builtin module to use from the __builtins__ global" - # pick the __builtins__ roughly in the same way CPython does it - # this is obscure and slow - space = self.space - try: - w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - else: - if w_builtin is space.builtin: # common case - return space.builtin - if space.isinstance_w(w_builtin, space.w_dict): + "Look up the builtin module to use from the __builtins__ global" + # pick the __builtins__ roughly in the same way CPython does it + # this is obscure and slow + space = self.space + try: + w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + else: + if w_builtin is space.builtin: # common case + return space.builtin + if space.isinstance_w(w_builtin, space.w_dict): return module.Module(space, None, w_builtin) - if isinstance(w_builtin, module.Module): - return w_builtin - # no builtin! make a default one. Give them None, at least. - builtin = module.Module(space, None) - space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) - return builtin + if isinstance(w_builtin, module.Module): + return w_builtin + # no builtin! make a default one. Give them None, at least. + builtin = module.Module(space, None) + space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) + return builtin def setup_after_space_initialization(self): """NOT_RPYTHON""" diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -86,20 +86,20 @@ (not _MS_WINDOWS or fieldsize * 8 == last_size) and fieldsize * 8 <= last_size and bitoffset + bitsize <= last_size): - # continue bit field - field_type = CONT_BITFIELD + # continue bit field + field_type = CONT_BITFIELD elif (not _MS_WINDOWS and last_size and # we have a bitfield open fieldsize * 8 >= last_size and bitoffset + bitsize <= fieldsize * 8): - # expand bit field - field_type = EXPAND_BITFIELD + # expand bit field + field_type = EXPAND_BITFIELD else: - # start new bitfield - field_type = NEW_BITFIELD - has_bitfield = True - bitoffset = 0 - last_size = fieldsize * 8 + # start new bitfield + field_type = NEW_BITFIELD + has_bitfield = True + bitoffset = 0 + last_size = fieldsize * 8 if is_union: pos.append(0) diff --git a/pypy/module/cppyy/bench/hsimple.py b/pypy/module/cppyy/bench/hsimple.py --- a/pypy/module/cppyy/bench/hsimple.py +++ b/pypy/module/cppyy/bench/hsimple.py @@ -37,7 +37,7 @@ import random if _reflex: - gROOT.SetBatch(True) + gROOT.SetBatch(True) # Create a new ROOT binary machine independent file. # Note that this file may contain any kind of ROOT objects, histograms, diff --git a/pypy/module/cppyy/bench/hsimple_rflx.py b/pypy/module/cppyy/bench/hsimple_rflx.py --- a/pypy/module/cppyy/bench/hsimple_rflx.py +++ b/pypy/module/cppyy/bench/hsimple_rflx.py @@ -80,14 +80,14 @@ for i in xrange(2500000): # Generate random values. # px, py = random.gauss(0, 1), random.gauss(0, 1) - px, py = random.Gaus(0, 1), random.Gaus(0, 1) + px, py = random.Gaus(0, 1), random.Gaus(0, 1) # pt = (px*px + py*py)**0.5 - pt = math.sqrt(px*px + py*py) + pt = math.sqrt(px*px + py*py) # pt = (px*px + py*py) # random = rndm(1) # Fill histograms. - hpx.Fill(pt) + hpx.Fill(pt) # hpxpyFill( px, py ) # hprofFill( px, pz ) # ntupleFill( px, py, pz, random, i ) @@ -105,7 +105,7 @@ #gBenchmark.Show( 'hsimple' ) -hpx.Print() +hpx.Print() # Save all objects in this file. #hpx.SetFillColor( 0 ) diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -225,9 +225,9 @@ if w_newpart is None or not PyString_Check(space, ref[0]) or \ not PyString_Check(space, w_newpart): - Py_DecRef(space, ref[0]) - ref[0] = lltype.nullptr(PyObject.TO) - return + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + return w_str = from_ref(space, ref[0]) w_newstr = space.add(w_str, w_newpart) Py_DecRef(space, ref[0]) diff --git a/pypy/module/parser/__init__.py b/pypy/module/parser/__init__.py --- a/pypy/module/parser/__init__.py +++ b/pypy/module/parser/__init__.py @@ -2,28 +2,27 @@ class Module(MixedModule): - """The builtin parser module.""" + """The builtin parser module.""" - applevel_name = 'parser' + applevel_name = 'parser' - appleveldefs = { - } + appleveldefs = { + } - interpleveldefs = { - '__name__' : '(space.wrap("parser"))', - '__doc__' : '(space.wrap("parser module"))', - - 'suite' : 'pyparser.suite', - 'expr' : 'pyparser.expr', - 'issuite' : 'pyparser.issuite', - 'isexpr' : 'pyparser.isexpr', - 'STType' : 'pyparser.W_STType', - 'ast2tuple' : 'pyparser.st2tuple', - 'st2tuple' : 'pyparser.st2tuple', - 'ast2list' : 'pyparser.st2list', - 'ast2tuple' : 'pyparser.st2tuple', - 'ASTType' : 'pyparser.W_STType', - 'compilest' : 'pyparser.compilest', - 'compileast' : 'pyparser.compilest', - 'ParserError' : 'space.new_exception_class("parser.ParserError")', - } + interpleveldefs = { + '__name__' : '(space.wrap("parser"))', + '__doc__' : '(space.wrap("parser module"))', + 'suite' : 'pyparser.suite', + 'expr' : 'pyparser.expr', + 'issuite' : 'pyparser.issuite', + 'isexpr' : 'pyparser.isexpr', + 'STType' : 'pyparser.W_STType', + 'ast2tuple' : 'pyparser.st2tuple', + 'st2tuple' : 'pyparser.st2tuple', + 'ast2list' : 'pyparser.st2list', + 'ast2tuple' : 'pyparser.st2tuple', + 'ASTType' : 'pyparser.W_STType', + 'compilest' : 'pyparser.compilest', + 'compileast' : 'pyparser.compilest', + 'ParserError' : 'space.new_exception_class("parser.ParserError")', + } diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -146,9 +146,9 @@ for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: - for op in chunk.operations: - if op.name == 'label': - yield op + for op in chunk.operations: + if op.name == 'label': + yield op def allops(self, *args, **kwds): return list(self._allops(*args, **kwds)) diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -209,12 +209,12 @@ tzname = ["", ""] if _WIN: - c_tzset() - timezone = c_get_timezone() - altzone = timezone - 3600 - daylight = c_get_daylight() - tzname_ptr = c_get_tzname() - tzname = rffi.charp2str(tzname_ptr[0]), rffi.charp2str(tzname_ptr[1]) + c_tzset() + timezone = c_get_timezone() + altzone = timezone - 3600 + daylight = c_get_daylight() + tzname_ptr = c_get_tzname() + tzname = rffi.charp2str(tzname_ptr[0]), rffi.charp2str(tzname_ptr[1]) if _POSIX: if _CYGWIN: diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -379,13 +379,13 @@ if _check_notimplemented(space, w_res): return w_res if w_right_impl is not None: - if space.is_w(w_obj3, space.w_None): - w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1) - else: - w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1, + if space.is_w(w_obj3, space.w_None): + w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1) + else: + w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1, w_obj3) - if _check_notimplemented(space, w_res): - return w_res + if _check_notimplemented(space, w_res): + return w_res raise OperationError(space.w_TypeError, space.wrap("operands do not support **")) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -539,11 +539,11 @@ if left: #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars) while lpos < rpos and u_self[lpos].isspace(): - lpos += 1 + lpos += 1 if right: while rpos > lpos and u_self[rpos - 1].isspace(): - rpos -= 1 + rpos -= 1 assert rpos >= lpos # annotator hint, don't remove return sliced(space, u_self, lpos, rpos, w_self) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -359,11 +359,11 @@ if left: while lpos < rpos and u_self[lpos] in u_chars: - lpos += 1 + lpos += 1 if right: while rpos > lpos and u_self[rpos - 1] in u_chars: - rpos -= 1 + rpos -= 1 assert rpos >= 0 result = u_self[lpos: rpos] diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -177,8 +177,8 @@ r.const = isinstance(s_obj.const, typ) elif our_issubclass(s_obj.knowntype, typ): if not s_obj.can_be_none(): - r.const = True - elif not our_issubclass(typ, s_obj.knowntype): + r.const = True + elif not our_issubclass(typ, s_obj.knowntype): r.const = False elif s_obj.knowntype == int and typ == bool: # xxx this will explode in case of generalisation # from bool to int, notice that isinstance( , bool|int) @@ -207,12 +207,12 @@ r.const = hasattr(s_obj.const, s_attr.const) elif (isinstance(s_obj, SomePBC) and s_obj.getKind() is description.FrozenDesc): - answers = {} - for d in s_obj.descriptions: - answer = (d.s_read_attribute(s_attr.const) != s_ImpossibleValue) - answers[answer] = True - if len(answers) == 1: - r.const, = answers + answers = {} + for d in s_obj.descriptions: + answer = (d.s_read_attribute(s_attr.const) != s_ImpossibleValue) + answers[answer] = True + if len(answers) == 1: + r.const, = answers return r ##def builtin_callable(s_obj): @@ -344,7 +344,7 @@ return SomeAddress() def unicodedata_decimal(s_uchr): - raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" + raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" def test(*args): return s_Bool @@ -395,7 +395,7 @@ if hasattr(object.__init__, 'im_func'): BUILTIN_ANALYZERS[object.__init__.im_func] = object_init else: - BUILTIN_ANALYZERS[object.__init__] = object_init + BUILTIN_ANALYZERS[object.__init__] = object_init # import BUILTIN_ANALYZERS[__import__] = import_func @@ -549,12 +549,12 @@ return s_Bool def classof(i): - assert isinstance(i, SomeOOInstance) + assert isinstance(i, SomeOOInstance) return SomeOOClass(i.ootype) def subclassof(class1, class2): - assert isinstance(class1, SomeOOClass) - assert isinstance(class2, SomeOOClass) + assert isinstance(class1, SomeOOClass) + assert isinstance(class2, SomeOOClass) return s_Bool def runtimenew(c): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -5,14 +5,12 @@ from __future__ import absolute_import from types import MethodType -from rpython.annotator.model import \ - SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, \ - SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, \ - SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, SomePBC, \ - SomeTypedAddressAccess, SomeAddress, SomeType, \ - s_ImpossibleValue, s_Bool, s_None, \ - unionof, missing_operation, add_knowntypedata, HarmlesslyBlocked, \ - SomeWeakRef, SomeUnicodeString +from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, + SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, + SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, + SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, + s_Bool, s_None, unionof, missing_operation, add_knowntypedata, + HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? @@ -39,7 +37,7 @@ def type(obj, *moreargs): if moreargs: - raise Exception, 'type() called with more than one argument' + raise Exception('type() called with more than one argument') r = SomeType() bk = getbookkeeper() op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=obj) diff --git a/rpython/jit/backend/arm/test/test_instr_codebuilder.py b/rpython/jit/backend/arm/test/test_instr_codebuilder.py --- a/rpython/jit/backend/arm/test/test_instr_codebuilder.py +++ b/rpython/jit/backend/arm/test/test_instr_codebuilder.py @@ -76,16 +76,16 @@ self.assert_equal('ASR r7, r5, #24') def test_orr_rr_no_shift(self): - self.cb.ORR_rr(r.r0.value, r.r7.value,r.r12.value) + self.cb.ORR_rr(r.r0.value, r.r7.value, r.r12.value) self.assert_equal('ORR r0, r7, r12') def test_orr_rr_lsl_8(self): - self.cb.ORR_rr(r.r0.value, r.r7.value,r.r12.value, 8) + self.cb.ORR_rr(r.r0.value, r.r7.value, r.r12.value, 8) self.assert_equal('ORR r0, r7, r12, lsl #8') def test_push_one_reg(self): if get_as_version() < (2, 23): - py.test.xfail("GNU as before version 2.23 generates encoding A1 for " + py.test.xfail("GNU as before version 2.23 generates encoding A1 for " "pushing only one register") self.cb.PUSH([r.r1.value]) self.assert_equal('PUSH {r1}') diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -374,7 +374,7 @@ nonconstbox = clonebox def getref_base(self): - return self.value + return self.value def getref(self, OBJ): return ootype.cast_from_object(OBJ, self.getref_base()) diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -183,8 +183,8 @@ if snapshot is None: return lltype.nullptr(NUMBERING), {}, 0 if snapshot in self.numberings: - numb, liveboxes, v = self.numberings[snapshot] - return numb, liveboxes.copy(), v + numb, liveboxes, v = self.numberings[snapshot] + return numb, liveboxes.copy(), v numb1, liveboxes, v = self.number(optimizer, snapshot.prev) n = len(liveboxes) - v diff --git a/rpython/jit/metainterp/test/test_greenfield.py b/rpython/jit/metainterp/test/test_greenfield.py --- a/rpython/jit/metainterp/test/test_greenfield.py +++ b/rpython/jit/metainterp/test/test_greenfield.py @@ -56,5 +56,6 @@ class TestLLtypeGreenFieldsTests(GreenFieldsTests, LLJitMixin): pass + class TestOOtypeGreenFieldsTests(GreenFieldsTests, OOJitMixin): - pass + pass diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -178,5 +178,6 @@ class TestLLtypeImmutableFieldsTests(ImmutableFieldsTests, LLJitMixin): pass + class TestOOtypeImmutableFieldsTests(ImmutableFieldsTests, OOJitMixin): - pass + pass diff --git a/rpython/jit/tl/test/test_tl.py b/rpython/jit/tl/test/test_tl.py --- a/rpython/jit/tl/test/test_tl.py +++ b/rpython/jit/tl/test/test_tl.py @@ -79,10 +79,10 @@ ] def test_ops(self): - for insn, pyop, values in self.ops: - for first, second in values: - code = [PUSH, first, PUSH, second, insn] - assert self.interp(list2bytecode(code)) == pyop(first, second) + for insn, pyop, values in self.ops: + for first, second in values: + code = [PUSH, first, PUSH, second, insn] + assert self.interp(list2bytecode(code)) == pyop(first, second) def test_branch_forward(self): diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py --- a/rpython/memory/gc/generation.py +++ b/rpython/memory/gc/generation.py @@ -468,7 +468,7 @@ JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS def write_barrier(self, newvalue, addr_struct): - if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: + if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: self.remember_young_pointer(addr_struct, newvalue) def _setup_wb(self): diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -80,10 +80,11 @@ self.ignore) def __getstate__(self): - return (self.token_regexs, self.names, self.ignore) + return (self.token_regexs, self.names, self.ignore) def __setstate__(self, args): - self.__init__(*args) + self.__init__(*args) + class DummyLexer(Lexer): def __init__(self, matcher, automaton, ignore): diff --git a/rpython/rlib/rope.py b/rpython/rlib/rope.py --- a/rpython/rlib/rope.py +++ b/rpython/rlib/rope.py @@ -79,7 +79,7 @@ def is_ascii(self): raise NotImplementedError("base class") - + def is_bytestring(self): raise NotImplementedError("base class") @@ -144,7 +144,7 @@ def __init__(self, s): assert isinstance(s, str) self.s = s - + def length(self): return len(self.s) @@ -245,7 +245,7 @@ def __init__(self, u): assert isinstance(u, unicode) self.u = u - + def length(self): return len(self.u) @@ -254,7 +254,7 @@ def is_ascii(self): return False # usually not - + def is_bytestring(self): return False @@ -415,7 +415,7 @@ def flatten_unicode(self): f = fringe(self) return u"".join([node.flatten_unicode() for node in f]) - + def hash_part(self): return self.additional_info().hash @@ -435,7 +435,7 @@ return self return rebalance([self], self.len) - + def _concat(self, other): if isinstance(other, LiteralNode): r = self.right @@ -946,7 +946,7 @@ self.index = 0 if start: self._advance_to(start) - + def _advance_to(self, index): self.index = self.iter._seekforward(index) self.node = self.iter.next() @@ -1106,7 +1106,7 @@ self.stack.pop() raise StopIteration - + def seekback(self, numchars): if numchars <= self.index: self.index -= numchars @@ -1148,7 +1148,7 @@ self.stop = self.start else: self.restart_positions = construct_restart_positions_node(sub) - + def next(self): if self.search_length == 0: if (self.stop - self.start) < 0: @@ -1257,20 +1257,20 @@ def strip(node, left=True, right=True, predicate=lambda i: chr(i).isspace(), *extraargs): length = node.length() - + lpos = 0 rpos = length - + if left: iter = ItemIterator(node) while lpos < rpos and predicate(iter.nextint(), *extraargs): - lpos += 1 - + lpos += 1 + if right: iter = ReverseItemIterator(node) while rpos > lpos and predicate(iter.nextint(), *extraargs): - rpos -= 1 - + rpos -= 1 + assert rpos >= lpos return getslice_one(node, lpos, rpos) strip._annspecialcase_ = "specialize:arg(3)" @@ -1501,10 +1501,10 @@ ch = ord(s[i]) i += 1 if (ch < 0x80): - # Encode ASCII + # Encode ASCII result.append(chr(ch)) continue - # Encode Latin-1 + # Encode Latin-1 result.append(chr((0xc0 | (ch >> 6)))) result.append(chr((0x80 | (ch & 0x3f)))) return "".join(result) diff --git a/rpython/rlib/rstruct/ieee.py b/rpython/rlib/rstruct/ieee.py --- a/rpython/rlib/rstruct/ieee.py +++ b/rpython/rlib/rstruct/ieee.py @@ -163,7 +163,7 @@ # Raise on overflow (in some circumstances, may want to return # infinity instead). if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") + raise OverflowError("float too large to pack in this format") # check constraints if not objectmodel.we_are_translated(): @@ -219,7 +219,7 @@ # Raise on overflow (in some circumstances, may want to return # infinity instead). if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") + raise OverflowError("float too large to pack in this format") # check constraints if not objectmodel.we_are_translated(): diff --git a/rpython/rlib/test/test_rcomplex.py b/rpython/rlib/test/test_rcomplex.py --- a/rpython/rlib/test/test_rcomplex.py +++ b/rpython/rlib/test/test_rcomplex.py @@ -22,15 +22,15 @@ ((10, -3), (-5, 7), (15, -10)), ((42, 0.3), (42, 0.3), (0, 0)) ]: - assert c.c_sub(c1, c2) == result + assert c.c_sub(c1, c2) == result def test_mul(): - for c1, c2, result in [ + for c1, c2, result in [ ((0, 0), (0, 0), (0, 0)), ((1, 0), (2, 0), (2, 0)), ((0, 3), (0, 2), (-6, 0)), ((0, -3), (-5, 0), (0, 15)), - ]: + ]: assert c.c_mul(c1, c2) == result def test_div(): @@ -65,7 +65,7 @@ struct.pack('l',int(rhs_pieces[i]))) else: rhs_pieces[i] = float(rhs_pieces[i]) - #id, fn, arg1_real, arg1_imag arg2_real, arg2_imag = + #id, fn, arg1_real, arg1_imag arg2_real, arg2_imag = #exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1] flags = rhs_pieces[2:] id_f, fn = lhs_pieces[:2] @@ -108,7 +108,7 @@ (args[0][0], args[0][1], args[1][0], args[1][1]) else: return '(complex(%r, %r))' % (args[0], args[1]) - + def rAssertAlmostEqual(a, b, rel_err = 2e-15, abs_err = 5e-323, msg=''): """Fail if the two floating-point numbers are not almost equal. diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -61,7 +61,7 @@ # Things with a tuple return type have a fake impl for RPython, check # to see if the method has one. if hasattr(oo_math, method_name): - oofake = getattr(oo_math, method_name) + oofake = getattr(oo_math, method_name) register_external(getattr(module, name), arg_types, return_type, export_name='ll_math.%s' % method_name, sandboxsafe=True, diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -718,186 +718,186 @@ return None def lltype2ctypes(llobj, normalize=True): - """Convert the lltype object 'llobj' to its ctypes equivalent. - 'normalize' should only be False in tests, where we want to - inspect the resulting ctypes object manually. - """ - with rlock: - if isinstance(llobj, lltype._uninitialized): - return uninitialized2ctypes(llobj.TYPE) - if isinstance(llobj, llmemory.AddressAsInt): - cobj = ctypes.cast(lltype2ctypes(llobj.adr), ctypes.c_void_p) - res = intmask(cobj.value) - _int2obj[res] = llobj.adr.ptr._obj - return res - if isinstance(llobj, llmemory.fakeaddress): - llobj = llobj.ptr or 0 + """Convert the lltype object 'llobj' to its ctypes equivalent. + 'normalize' should only be False in tests, where we want to + inspect the resulting ctypes object manually. + """ + with rlock: + if isinstance(llobj, lltype._uninitialized): + return uninitialized2ctypes(llobj.TYPE) + if isinstance(llobj, llmemory.AddressAsInt): + cobj = ctypes.cast(lltype2ctypes(llobj.adr), ctypes.c_void_p) + res = intmask(cobj.value) + _int2obj[res] = llobj.adr.ptr._obj + return res + if isinstance(llobj, llmemory.fakeaddress): + llobj = llobj.ptr or 0 - T = lltype.typeOf(llobj) + T = lltype.typeOf(llobj) - if isinstance(T, lltype.Ptr): - if not llobj: # NULL pointer + if isinstance(T, lltype.Ptr): + if not llobj: # NULL pointer + if T == llmemory.GCREF: + return ctypes.c_void_p(0) + return get_ctypes_type(T)() + if T == llmemory.GCREF: - return ctypes.c_void_p(0) - return get_ctypes_type(T)() + if isinstance(llobj._obj, _llgcopaque): + return ctypes.c_void_p(llobj._obj.intval) + if isinstance(llobj._obj, int): # tagged pointer + return ctypes.c_void_p(llobj._obj) + container = llobj._obj.container + T = lltype.Ptr(lltype.typeOf(container)) + # otherwise it came from integer and we want a c_void_p with + # the same value + if getattr(container, 'llopaque', None): + try: + no = _opaque_objs_seen[container] + except KeyError: + no = len(_opaque_objs) + _opaque_objs.append(container) + _opaque_objs_seen[container] = no + return no * 2 + 1 + else: + container = llobj._obj + if isinstance(T.TO, lltype.FuncType): + # XXX a temporary workaround for comparison of lltype.FuncType + key = llobj._obj.__dict__.copy() + key['_TYPE'] = repr(key['_TYPE']) + items = key.items() + items.sort() + key = tuple(items) + if key in _all_callbacks: + return _all_callbacks[key] + v1voidlist = [(i, getattr(container, '_void' + str(i), None)) + for i in range(len(T.TO.ARGS)) + if T.TO.ARGS[i] is lltype.Void] + def callback_internal(*cargs): + cargs = list(cargs) + for v1 in v1voidlist: + cargs.insert(v1[0], v1[1]) + assert len(cargs) == len(T.TO.ARGS) + llargs = [] + for ARG, carg in zip(T.TO.ARGS, cargs): + if ARG is lltype.Void: + llargs.append(carg) + else: + llargs.append(ctypes2lltype(ARG, carg)) + if hasattr(container, 'graph'): + if LLInterpreter.current_interpreter is None: + raise AssertionError + llinterp = LLInterpreter.current_interpreter + try: + llres = llinterp.eval_graph(container.graph, llargs) + except LLException, lle: + llinterp._store_exception(lle) + return 0 + #except: + # import pdb + # pdb.set_trace() + else: + try: + llres = container._callable(*llargs) + except LLException, lle: + llinterp = LLInterpreter.current_interpreter + llinterp._store_exception(lle) + return 0 + assert lltype.typeOf(llres) == T.TO.RESULT + if T.TO.RESULT is lltype.Void: + return None + res = lltype2ctypes(llres) + if isinstance(T.TO.RESULT, lltype.Ptr): + _all_callbacks_results.append(res) + res = ctypes.cast(res, ctypes.c_void_p).value + if res is None: + return 0 + if T.TO.RESULT == lltype.SingleFloat: + res = res.value # baaaah, cannot return a c_float() + return res - if T == llmemory.GCREF: - if isinstance(llobj._obj, _llgcopaque): - return ctypes.c_void_p(llobj._obj.intval) - if isinstance(llobj._obj, int): # tagged pointer - return ctypes.c_void_p(llobj._obj) - container = llobj._obj.container - T = lltype.Ptr(lltype.typeOf(container)) - # otherwise it came from integer and we want a c_void_p with - # the same value - if getattr(container, 'llopaque', None): - try: - no = _opaque_objs_seen[container] - except KeyError: - no = len(_opaque_objs) - _opaque_objs.append(container) - _opaque_objs_seen[container] = no - return no * 2 + 1 - else: - container = llobj._obj - if isinstance(T.TO, lltype.FuncType): - # XXX a temporary workaround for comparison of lltype.FuncType - key = llobj._obj.__dict__.copy() - key['_TYPE'] = repr(key['_TYPE']) - items = key.items() - items.sort() - key = tuple(items) - if key in _all_callbacks: - return _all_callbacks[key] - v1voidlist = [(i, getattr(container, '_void' + str(i), None)) - for i in range(len(T.TO.ARGS)) - if T.TO.ARGS[i] is lltype.Void] - def callback_internal(*cargs): - cargs = list(cargs) - for v1 in v1voidlist: - cargs.insert(v1[0], v1[1]) - assert len(cargs) == len(T.TO.ARGS) - llargs = [] - for ARG, carg in zip(T.TO.ARGS, cargs): - if ARG is lltype.Void: - llargs.append(carg) - else: - llargs.append(ctypes2lltype(ARG, carg)) - if hasattr(container, 'graph'): - if LLInterpreter.current_interpreter is None: - raise AssertionError - llinterp = LLInterpreter.current_interpreter + def callback(*cargs): try: - llres = llinterp.eval_graph(container.graph, llargs) - except LLException, lle: - llinterp._store_exception(lle) - return 0 - #except: - # import pdb - # pdb.set_trace() + return callback_internal(*cargs) + except: + import sys + #if option.usepdb: + # import pdb; pdb.post_mortem(sys.exc_traceback) + global _callback_exc_info + _callback_exc_info = sys.exc_info() + raise + + if isinstance(T.TO.RESULT, lltype.Ptr): + TMod = lltype.Ptr(lltype.FuncType(T.TO.ARGS, + lltype.Signed)) + ctypes_func_type = get_ctypes_type(TMod) + res = ctypes_func_type(callback) + ctypes_func_type = get_ctypes_type(T) + res = ctypes.cast(res, ctypes_func_type) else: - try: - llres = container._callable(*llargs) - except LLException, lle: - llinterp = LLInterpreter.current_interpreter - llinterp._store_exception(lle) - return 0 - assert lltype.typeOf(llres) == T.TO.RESULT - if T.TO.RESULT is lltype.Void: - return None - res = lltype2ctypes(llres) - if isinstance(T.TO.RESULT, lltype.Ptr): - _all_callbacks_results.append(res) - res = ctypes.cast(res, ctypes.c_void_p).value - if res is None: - return 0 - if T.TO.RESULT == lltype.SingleFloat: - res = res.value # baaaah, cannot return a c_float() + ctypes_func_type = get_ctypes_type(T) + res = ctypes_func_type(callback) + _all_callbacks[key] = res + key2 = intmask(ctypes.cast(res, ctypes.c_void_p).value) + _int2obj[key2] = container return res - def callback(*cargs): - try: - return callback_internal(*cargs) - except: - import sys - #if option.usepdb: - # import pdb; pdb.post_mortem(sys.exc_traceback) - global _callback_exc_info - _callback_exc_info = sys.exc_info() - raise + index = 0 + if isinstance(container, lltype._subarray): + topmost, index = _find_parent(container) + container = topmost + T = lltype.Ptr(lltype.typeOf(container)) - if isinstance(T.TO.RESULT, lltype.Ptr): - TMod = lltype.Ptr(lltype.FuncType(T.TO.ARGS, - lltype.Signed)) - ctypes_func_type = get_ctypes_type(TMod) - res = ctypes_func_type(callback) - ctypes_func_type = get_ctypes_type(T) - res = ctypes.cast(res, ctypes_func_type) + if container._storage is None: + raise RuntimeError("attempting to pass a freed structure to C") + if container._storage is True: + # container has regular lltype storage, convert it to ctypes + if isinstance(T.TO, lltype.Struct): + convert_struct(container) + elif isinstance(T.TO, lltype.Array): + convert_array(container) + elif isinstance(T.TO, lltype.OpaqueType): + if T.TO != lltype.RuntimeTypeInfo: + cbuf = ctypes.create_string_buffer(T.TO.hints['getsize']()) + else: + cbuf = ctypes.create_string_buffer("\x00") + cbuf = ctypes.cast(cbuf, ctypes.c_void_p) + add_storage(container, _parentable_mixin, cbuf) + else: + raise NotImplementedError(T) + container._ctypes_storage_was_allocated() + + if isinstance(T.TO, lltype.OpaqueType): + return container._storage.value + + storage = container._storage + p = storage + if index: + p = ctypes.cast(p, ctypes.c_void_p) + p = ctypes.c_void_p(p.value + index) + c_tp = get_ctypes_type(T.TO) + storage.contents._normalized_ctype = c_tp + if normalize and hasattr(storage.contents, '_normalized_ctype'): + normalized_ctype = storage.contents._normalized_ctype + p = ctypes.cast(p, ctypes.POINTER(normalized_ctype)) + if lltype.typeOf(llobj) == llmemory.GCREF: + p = ctypes.cast(p, ctypes.c_void_p) + return p + + if isinstance(llobj, Symbolic): + if isinstance(llobj, llmemory.ItemOffset): + llobj = ctypes.sizeof(get_ctypes_type(llobj.TYPE)) * llobj.repeat + elif isinstance(llobj, ComputedIntSymbolic): + llobj = llobj.compute_fn() else: - ctypes_func_type = get_ctypes_type(T) - res = ctypes_func_type(callback) - _all_callbacks[key] = res - key2 = intmask(ctypes.cast(res, ctypes.c_void_p).value) - _int2obj[key2] = container - return res + raise NotImplementedError(llobj) # don't know about symbolic value - index = 0 - if isinstance(container, lltype._subarray): - topmost, index = _find_parent(container) - container = topmost - T = lltype.Ptr(lltype.typeOf(container)) + if T is lltype.Char or T is lltype.UniChar: + return ord(llobj) - if container._storage is None: - raise RuntimeError("attempting to pass a freed structure to C") - if container._storage is True: - # container has regular lltype storage, convert it to ctypes - if isinstance(T.TO, lltype.Struct): - convert_struct(container) - elif isinstance(T.TO, lltype.Array): - convert_array(container) - elif isinstance(T.TO, lltype.OpaqueType): - if T.TO != lltype.RuntimeTypeInfo: - cbuf = ctypes.create_string_buffer(T.TO.hints['getsize']()) - else: - cbuf = ctypes.create_string_buffer("\x00") - cbuf = ctypes.cast(cbuf, ctypes.c_void_p) - add_storage(container, _parentable_mixin, cbuf) - else: - raise NotImplementedError(T) - container._ctypes_storage_was_allocated() + if T is lltype.SingleFloat: + return ctypes.c_float(float(llobj)) - if isinstance(T.TO, lltype.OpaqueType): - return container._storage.value - - storage = container._storage - p = storage - if index: - p = ctypes.cast(p, ctypes.c_void_p) - p = ctypes.c_void_p(p.value + index) - c_tp = get_ctypes_type(T.TO) - storage.contents._normalized_ctype = c_tp - if normalize and hasattr(storage.contents, '_normalized_ctype'): - normalized_ctype = storage.contents._normalized_ctype - p = ctypes.cast(p, ctypes.POINTER(normalized_ctype)) - if lltype.typeOf(llobj) == llmemory.GCREF: - p = ctypes.cast(p, ctypes.c_void_p) - return p - - if isinstance(llobj, Symbolic): - if isinstance(llobj, llmemory.ItemOffset): - llobj = ctypes.sizeof(get_ctypes_type(llobj.TYPE)) * llobj.repeat - elif isinstance(llobj, ComputedIntSymbolic): - llobj = llobj.compute_fn() - else: - raise NotImplementedError(llobj) # don't know about symbolic value - - if T is lltype.Char or T is lltype.UniChar: - return ord(llobj) - - if T is lltype.SingleFloat: - return ctypes.c_float(float(llobj)) - - return llobj + return llobj def ctypes2lltype(T, cobj): """Convert the ctypes object 'cobj' to its lltype equivalent. diff --git a/rpython/rtyper/lltypesystem/test/test_lltype.py b/rpython/rtyper/lltypesystem/test/test_lltype.py --- a/rpython/rtyper/lltypesystem/test/test_lltype.py +++ b/rpython/rtyper/lltypesystem/test/test_lltype.py @@ -53,9 +53,9 @@ length = len(l.items) newitems = malloc(List_typ.items.TO, length+1) i = 0 - while i len(prefixes[match]): - match = i - i += 1 - cache[tag] = match, module - return match + cache = {} + annotated = annotator.annotated + def indx(block): + func = annotated[block] + module = func.__module__ + if module is None: + module = 'None' + tag = "%s:%s" % (module, func.__name__) + try: + return cache[tag] + except KeyError: + match = NOMATCH + i = 0 + for pfx in prefixes: + if tag.startswith(pfx): + if match == NOMATCH: + match = i + else: + if len(pfx) > len(prefixes[match]): + match = i + i += 1 + cache[tag] = match, module + return match - pending.sort(lambda blk1, blk2: cmp(indx(blk1), indx(blk2))) + pending.sort(lambda blk1, blk2: cmp(indx(blk1), indx(blk2))) - cur_module = ['$'] - def track(block): - module = annotated[block].__module__ - if module != cur_module[0]: - print "--- Specializing blocks in module: %s" % module - cur_module[0] = module - return track - + cur_module = ['$'] + def track(block): + module = annotated[block].__module__ + if module != cur_module[0]: + print "--- Specializing blocks in module: %s" % module + cur_module[0] = module + return track diff --git a/rpython/translator/test/snippet.py b/rpython/translator/test/snippet.py --- a/rpython/translator/test/snippet.py +++ b/rpython/translator/test/snippet.py @@ -62,7 +62,7 @@ def sieve_of_eratosthenes(): """Sieve of Eratosthenes - + This one is from an infamous benchmark, "The Great Computer Language Shootout". @@ -103,7 +103,7 @@ s = s + '!' return s -def poor_man_range(i=int): +def poor_man_range(i=int): lst = [] while i > 0: i = i - 1 @@ -111,14 +111,14 @@ lst.reverse() return lst -def poor_man_rev_range(i=int): +def poor_man_rev_range(i=int): lst = [] while i > 0: i = i - 1 lst += [i] return lst -def simple_id(x=anytype): +def simple_id(x=anytype): return x def branch_id(cond=anytype, a=anytype, b=anytype): @@ -257,7 +257,7 @@ else: return 1 -def _append_five(lst): +def _append_five(lst): lst += [5] def call_five(): @@ -265,7 +265,7 @@ _append_five(a) return a -def _append_six(lst): +def _append_six(lst): lst += [6] def call_five_six(): @@ -333,7 +333,7 @@ return r -# INHERITANCE / CLASS TESTS +# INHERITANCE / CLASS TESTS class C(object): pass def build_instance(): @@ -565,7 +565,7 @@ b = star_args1def(5) c = star_args1def() return a+b+c - + def star_args(x, y, *args): return x + args[0] @@ -605,10 +605,10 @@ This one is from a Philippine Pythonista Hangout, an modified version of Andy Sy's code. - + list.append is modified to list concatenation, and powerset is pre-allocated and stored, instead of printed. - + URL is: http://lists.free.net.ph/pipermail/python/2002-November/ """ set = range(setsize) @@ -643,8 +643,8 @@ l = [] v = l.append while n: - l[7] = 5 # raises an exception - break + l[7] = 5 # raises an exception + break return v def _getstuff(x): @@ -686,7 +686,7 @@ pass def mergefunctions(cond): - if cond: + if cond: x = func1 else: x = func2 @@ -703,48 +703,48 @@ a,b = 3, "hello" return a -class APBC: - def __init__(self): +class APBC: + def __init__(self): self.answer = 42 apbc = APBC() apbc.answer = 7 def preserve_pbc_attr_on_instance(cond): - if cond: + if cond: x = APBC() - else: - x = apbc - return x.answer + else: + x = apbc + return x.answer class APBCS(object): __slots__ = ['answer'] - def __init__(self): + def __init__(self): self.answer = 42 apbcs = APBCS() apbcs.answer = 7 def preserve_pbc_attr_on_instance_with_slots(cond): - if cond: + if cond: x = APBCS() - else: + else: x = apbcs - return x.answer + return x.answer -def is_and_knowntype(x): - if x is None: - return x - else: - return None +def is_and_knowntype(x): + if x is None: + return x + else: + return None -def isinstance_and_knowntype(x): - if isinstance(x, APBC): +def isinstance_and_knowntype(x): + if isinstance(x, APBC): return x - else: - return apbc + else: + return apbc def simple_slice(x): return x[:10] @@ -836,7 +836,7 @@ witness(e) return e return Exc() - + def slice_union(x): if x: return slice(1) @@ -938,8 +938,8 @@ class Thing2(long): - def __new__(t,v): - return long.__new__(t,v*2) + def __new__(t, v): + return long.__new__(t, v * 2) thing2 = Thing2(2) @@ -971,16 +971,16 @@ class R: - def __init__(self, n): - if n>0: - self.r = R(n-1) - else: - self.r = None - self.n = n - if self.r: - self.m = self.r.n - else: - self.m = -1 + def __init__(self, n): + if n > 0: + self.r = R(n-1) + else: + self.r = None + self.n = n + if self.r: + self.m = self.r.n + else: + self.m = -1 def make_r(n): return R(n) @@ -989,7 +989,7 @@ pass class Even(B): - def __init__(self,n): + def __init__(self, n): if n > 0: self.x = [Odd(n-1)] self.y = self.x[0].x @@ -998,12 +998,12 @@ self.y = [] class Odd(B): - def __init__(self,n): + def __init__(self, n): self.x = [Even(n-1)] self.y = self.x[0].x def make_eo(n): - if n%2 == 0: + if n % 2 == 0: return Even(n) else: return Odd(n) @@ -1013,26 +1013,26 @@ # instances rev numbers class Box: - pass + pass class Box2: - pass + pass class Box3(Box2): - pass + pass def flow_rev_numbers(n): - bx3 = Box3() - bx3.x = 1 - bx = Box() - bx.bx3 = bx3 - if n >0: - z = bx.bx3.x - if n >0: - bx2 = Box2() - bx2.x = 3 - return z - raise Exception + bx3 = Box3() + bx3.x = 1 + bx = Box() + bx.bx3 = bx3 + if n > 0: + z = bx.bx3.x + if n > 0: + bx2 = Box2() + bx2.x = 3 + return z + raise Exception # class specialization @@ -1077,13 +1077,13 @@ return ovfcheck((-maxint-1) // i) except (OverflowError, ZeroDivisionError): raise - + def mul_func(x=numtype, y=numtype): try: return ovfcheck(x * y) except OverflowError: raise - + def mod_func(i=numtype): try: return ovfcheck((-maxint-1) % i) @@ -1098,7 +1098,8 @@ except ValueError: raise -class hugelmugel(OverflowError):pass +class hugelmugel(OverflowError): + pass def hugo(a, b, c):pass @@ -1112,6 +1113,7 @@ def unary_func(i=numtype): try: return ovfcheck(-i), ovfcheck(abs(i-1)) - except: raise + except: + raise # XXX it would be nice to get it right without an exception # handler at all, but then we need to do much harder parsing diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -445,4 +445,4 @@ class TestOOSpecializeListComprehension(TestLLSpecializeListComprehension): - typesystem = 'ootype' + typesystem = 'ootype' diff --git a/rpython/translator/tool/make_dot.py b/rpython/translator/tool/make_dot.py --- a/rpython/translator/tool/make_dot.py +++ b/rpython/translator/tool/make_dot.py @@ -143,13 +143,13 @@ color = "black" fillcolor = getattr(block, "blockcolor", "white") if not numblocks: - shape = "box" - if len(block.inputargs) == 1: - lines[-1] += 'return %s' % tuple(block.inputargs) - fillcolor= RETURN_COLOR - elif len(block.inputargs) == 2: - lines[-1] += 'raise %s, %s' % tuple(block.inputargs) - fillcolor= EXCEPT_COLOR + shape = "box" + if len(block.inputargs) == 1: + lines[-1] += 'return %s' % tuple(block.inputargs) + fillcolor = RETURN_COLOR + elif len(block.inputargs) == 2: + lines[-1] += 'raise %s, %s' % tuple(block.inputargs) + fillcolor = EXCEPT_COLOR elif numblocks == 1: shape = "box" else: From noreply at buildbot.pypy.org Wed Jul 10 06:13:45 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 10 Jul 2013 06:13:45 +0200 (CEST) Subject: [pypy-commit] pypy default: More 4-space indentation fixes. Message-ID: <20130710041345.AE39F1C3364@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65309:9c5869afb49c Date: 2013-07-10 09:27 +1000 http://bitbucket.org/pypy/pypy/changeset/9c5869afb49c/ Log: More 4-space indentation fixes. diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -73,7 +73,7 @@ def f(): def f(y): - return x + y + return x + y return f x = 1 @@ -85,7 +85,7 @@ if n: x = 42 def f(y): - return x + y + return x + y return f g0 = f(0).func_closure[0] diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -542,7 +542,7 @@ # TODO: get the capi-identify test selection right ... if self.capi_identity != 'CINT': # don't test anything for Reflex - return + return import cppyy diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -131,11 +131,11 @@ # TVectorF is a typedef of floats v = cppyy.gbl.TVectorF(N) for i in range(N): - v[i] = i*i + v[i] = i*i assert len(v) == N for j in v: - assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0. + assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0. class AppTestCINTTTREE: diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -97,9 +97,9 @@ assert hasattr(v, 'end' ) for i in range(self.N): - v.push_back(cppyy.gbl.just_a_class()) - v[i].m_i = i - assert v[i].m_i == i + v.push_back(cppyy.gbl.just_a_class()) + v[i].m_i = i + assert v[i].m_i == i assert len(v) == self.N v.destruct() @@ -332,7 +332,7 @@ a = std.list(int)() for arg in a: - pass + pass class AppTestSTLMAP: @@ -395,7 +395,7 @@ m = std.map(int, int)() for key, value in m: - pass + pass def test04_unsignedvalue_typemap_types(self): """Test assignability of maps with unsigned value types""" diff --git a/pypy/module/oracle/interp_pool.py b/pypy/module/oracle/interp_pool.py --- a/pypy/module/oracle/interp_pool.py +++ b/pypy/module/oracle/interp_pool.py @@ -130,14 +130,14 @@ self.checkConnected(space) if __args__.keywords: - keywords = __args__.keywords + ["pool"] + keywords = __args__.keywords + ["pool"] else: - keywords = ["pool"] + keywords = ["pool"] if __args__.keywords_w: - keywords_w = __args__.keywords_w + [space.wrap(self)] + keywords_w = __args__.keywords_w + [space.wrap(self)] else: - keywords_w = [space.wrap(self)] - + keywords_w = [space.wrap(self)] + newargs = Arguments(space, __args__.arguments_w, keywords, diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py --- a/pypy/module/parser/pyparser.py +++ b/pypy/module/parser/pyparser.py @@ -75,7 +75,7 @@ info = pyparse.CompileInfo("", mode) parser = pyparse.PythonParser(space) try: - tree = parser.parse_source(source, info) + tree = parser.parse_source(source, info) except error.IndentationError, e: raise OperationError(space.w_IndentationError, e.wrap_info(space)) diff --git a/pypy/module/pyexpat/test/test_build.py b/pypy/module/pyexpat/test/test_build.py --- a/pypy/module/pyexpat/test/test_build.py +++ b/pypy/module/pyexpat/test/test_build.py @@ -12,7 +12,7 @@ py.test.skip("No module expat") try: - from pypy.module.pyexpat import interp_pyexpat + from pypy.module.pyexpat import interp_pyexpat except (ImportError, CompilationError): py.test.skip("Expat not installed") diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -63,7 +63,7 @@ offset = {} for i, op in enumerate(oplist): if i != 1: - offset[op] = i + offset[op] = i token = JitCellToken() token.number = 0 diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -91,13 +91,13 @@ next(cur) def test_cursor_after_close(con): - cur = con.execute('select 1') - cur.close() - con.close() - pytest.raises(_sqlite3.ProgrammingError, "cur.close()") - # raises ProgrammingError because should check closed before check args - pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)") - pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)") + cur = con.execute('select 1') + cur.close() + con.close() + pytest.raises(_sqlite3.ProgrammingError, "cur.close()") + # raises ProgrammingError because should check closed before check args + pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)") + pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)") @pytest.mark.skipif("not hasattr(sys, 'pypy_translation_info')") def test_connection_del(tmpdir): diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -24,150 +24,150 @@ """) def test_mix_classes(self): - @self.retry - def run(): - import __pypy__ - class A(object): - def f(self): - return 42 - class B(object): - def f(self): - return 43 - class C(object): - def f(self): - return 44 - l = [A(), B(), C()] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 + i % 3 - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 15 - assert cache_counter[1] >= 3 # should be (27, 3) - assert sum(cache_counter) == 30 + @self.retry + def run(): + import __pypy__ + class A(object): + def f(self): + return 42 + class B(object): + def f(self): + return 43 + class C(object): + def f(self): + return 44 + l = [A(), B(), C()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + i % 3 + cache_counter = __pypy__.method_cache_counter("f") + assert cache_counter[0] >= 15 + assert cache_counter[1] >= 3 # should be (27, 3) + assert sum(cache_counter) == 30 def test_class_that_cannot_be_cached(self): - @self.retry - def run(): - import __pypy__ - class X: - pass - class Y(object): - pass - class A(Y, X): - def f(self): - return 42 + @self.retry + def run(): + import __pypy__ + class X: + pass + class Y(object): + pass + class A(Y, X): + def f(self): + return 42 - class B(object): - def f(self): - return 43 - class C(object): - def f(self): - return 44 - l = [A(), B(), C()] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 + i % 3 - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 9 - assert cache_counter[1] >= 2 # should be (18, 2) - assert sum(cache_counter) == 20 - + class B(object): + def f(self): + return 43 + class C(object): + def f(self): + return 44 + l = [A(), B(), C()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + i % 3 + cache_counter = __pypy__.method_cache_counter("f") + assert cache_counter[0] >= 9 + assert cache_counter[1] >= 2 # should be (18, 2) + assert sum(cache_counter) == 20 + def test_change_methods(self): - @self.retry - def run(): - import __pypy__ - class A(object): - def f(self): - return 42 - l = [A()] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 + i - A.f = eval("lambda self: %s" % (42 + i + 1, )) - cache_counter = __pypy__.method_cache_counter("f") - # - # a bit of explanation about what's going on. (1) is the line "a.f()" - # and (2) is "A.f = ...". - # - # at line (1) we do the lookup on type(a).f - # - # at line (2) we do a setattr on A. However, descr_setattr does also a - # lookup of type(A).f i.e. type.f, to check if by chance 'f' is a data - # descriptor. - # - # At the first iteration: - # (1) is a miss because it's the first lookup of A.f. The result is cached - # - # (2) is a miss because it is the first lookup of type.f. The - # (non-existant) result is cached. The version of A changes, and 'f' - # is changed to be a cell object, so that subsequest assignments won't - # change the version of A - # - # At the second iteration: - # (1) is a miss because the version of A changed just before - # (2) is a hit, because type.f is cached. The version of A no longer changes - # - # At the third and subsequent iterations: - # (1) is a hit, because the version of A did not change - # (2) is a hit, see above - assert cache_counter == (17, 3) + @self.retry + def run(): + import __pypy__ + class A(object): + def f(self): + return 42 + l = [A()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + i + A.f = eval("lambda self: %s" % (42 + i + 1, )) + cache_counter = __pypy__.method_cache_counter("f") + # + # a bit of explanation about what's going on. (1) is the line "a.f()" + # and (2) is "A.f = ...". + # + # at line (1) we do the lookup on type(a).f + # + # at line (2) we do a setattr on A. However, descr_setattr does also a + # lookup of type(A).f i.e. type.f, to check if by chance 'f' is a data + # descriptor. + # + # At the first iteration: + # (1) is a miss because it's the first lookup of A.f. The result is cached + # + # (2) is a miss because it is the first lookup of type.f. The + # (non-existant) result is cached. The version of A changes, and 'f' + # is changed to be a cell object, so that subsequest assignments won't + # change the version of A + # + # At the second iteration: + # (1) is a miss because the version of A changed just before + # (2) is a hit, because type.f is cached. The version of A no longer changes + # + # At the third and subsequent iterations: + # (1) is a hit, because the version of A did not change + # (2) is a hit, see above + assert cache_counter == (17, 3) def test_subclasses(self): - @self.retry - def run(): - import __pypy__ - class A(object): - def f(self): - return 42 - class B(object): - def f(self): - return 43 - class C(A): - pass - l = [A(), B(), C()] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 + (i % 3 == 1) - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 15 - assert cache_counter[1] >= 3 # should be (27, 3) - assert sum(cache_counter) == 30 - + @self.retry + def run(): + import __pypy__ + class A(object): + def f(self): + return 42 + class B(object): + def f(self): + return 43 + class C(A): + pass + l = [A(), B(), C()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + (i % 3 == 1) + cache_counter = __pypy__.method_cache_counter("f") + assert cache_counter[0] >= 15 + assert cache_counter[1] >= 3 # should be (27, 3) + assert sum(cache_counter) == 30 + def test_many_names(self): - @self.retry - def run(): - import __pypy__ - for j in range(20): - class A(object): - foo = 5 - bar = 6 - baz = 7 - xyz = 8 - stuff = 9 - a = 10 - foobar = 11 + @self.retry + def run(): + import __pypy__ + for j in range(20): + class A(object): + foo = 5 + bar = 6 + baz = 7 + xyz = 8 + stuff = 9 + a = 10 + foobar = 11 - a = A() - names = [name for name in A.__dict__.keys() - if not name.startswith('_')] - names.sort() - names_repeated = names * 10 - result = [] - __pypy__.reset_method_cache_counter() - for name in names_repeated: - result.append(getattr(a, name)) - append_counter = __pypy__.method_cache_counter("append") - names_counters = [__pypy__.method_cache_counter(name) - for name in names] - try: - assert append_counter[0] >= 10 * len(names) - 1 - for name, count in zip(names, names_counters): - assert count == (9, 1), str((name, count)) - break - except AssertionError: - pass - else: - raise + a = A() + names = [name for name in A.__dict__.keys() + if not name.startswith('_')] + names.sort() + names_repeated = names * 10 + result = [] + __pypy__.reset_method_cache_counter() + for name in names_repeated: + result.append(getattr(a, name)) + append_counter = __pypy__.method_cache_counter("append") + names_counters = [__pypy__.method_cache_counter(name) + for name in names] + try: + assert append_counter[0] >= 10 * len(names) - 1 + for name, count in zip(names, names_counters): + assert count == (9, 1), str((name, count)) + break + except AssertionError: + pass + else: + raise def test_mutating_bases(self): class C(object): @@ -189,50 +189,50 @@ assert e.foo == 3 def test_custom_metaclass(self): - @self.retry - def run(): - import __pypy__ - for j in range(20): - class MetaA(type): - def __getattribute__(self, x): - return 1 - def f(self): - return 42 - A = type.__new__(MetaA, "A", (), {"f": f}) - l = [type.__getattribute__(A, "__new__")(A)] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 - cache_counter = __pypy__.method_cache_counter("f") - assert sum(cache_counter) == 10 - if cache_counter == (9, 1): - break - #else the moon is misaligned, try again - else: - raise AssertionError("cache_counter = %r" % (cache_counter,)) + @self.retry + def run(): + import __pypy__ + for j in range(20): + class MetaA(type): + def __getattribute__(self, x): + return 1 + def f(self): + return 42 + A = type.__new__(MetaA, "A", (), {"f": f}) + l = [type.__getattribute__(A, "__new__")(A)] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + cache_counter = __pypy__.method_cache_counter("f") + assert sum(cache_counter) == 10 + if cache_counter == (9, 1): + break + #else the moon is misaligned, try again + else: + raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): - @self.retry - def run(): - import __pypy__ - class A(object): - x = 1 - y = 2 - __pypy__.reset_method_cache_counter() - a = A() - for i in range(100): - assert a.y == 2 - assert a.x == i + 1 - A.x += 1 - cache_counter = __pypy__.method_cache_counter("x") - assert cache_counter[0] >= 350 - assert cache_counter[1] >= 1 - assert sum(cache_counter) == 400 + @self.retry + def run(): + import __pypy__ + class A(object): + x = 1 + y = 2 + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + assert a.x == i + 1 + A.x += 1 + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] >= 350 + assert cache_counter[1] >= 1 + assert sum(cache_counter) == 400 - __pypy__.reset_method_cache_counter() - a = A() - for i in range(100): - assert a.y == 2 - setattr(a, "a%s" % i, i) - cache_counter = __pypy__.method_cache_counter("x") - assert cache_counter[0] == 0 # 0 hits, because all the attributes are new + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + setattr(a, "a%s" % i, i) + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] == 0 # 0 hits, because all the attributes are new diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -378,11 +378,11 @@ if left: while lpos < rpos and _isspace(u_self[lpos]): - lpos += 1 + lpos += 1 if right: while rpos > lpos and _isspace(u_self[rpos - 1]): - rpos -= 1 + rpos -= 1 assert rpos >= 0 result = u_self[lpos: rpos] diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -318,12 +318,13 @@ raises(TypeError, operate, A()) def test_missing_getattribute(self): - class X(object): pass + class X(object): + pass class Y(X): - class __metaclass__(type): - def mro(cls): - return [cls, X] + class __metaclass__(type): + def mro(cls): + return [cls, X] x = X() x.__class__ = Y @@ -331,8 +332,10 @@ def test_silly_but_consistent_order(self): # incomparable objects sort by type name :-/ - class A(object): pass - class zz(object): pass + class A(object): + pass + class zz(object): + pass assert A() < zz() assert zz() > A() # if in doubt, CPython sorts numbers before non-numbers diff --git a/pypy/tool/pytest/result.py b/pypy/tool/pytest/result.py --- a/pypy/tool/pytest/result.py +++ b/pypy/tool/pytest/result.py @@ -1,53 +1,53 @@ -import sys +import sys import py import re -class Result(object): - def __init__(self, init=True): +class Result(object): + def __init__(self, init=True): self._headers = {} self._blocks = {} self._blocknames = [] - if init: - stdinit(self) + if init: + stdinit(self) - def __setitem__(self, name, value): - self._headers[name.lower()] = value + def __setitem__(self, name, value): + self._headers[name.lower()] = value - def __getitem__(self, name): + def __getitem__(self, name): return self._headers[name.lower()] - def get(self, name, default): - return self._headers.get(name, default) - - def __delitem__(self, name): + def get(self, name, default): + return self._headers.get(name, default) + + def __delitem__(self, name): del self._headers[name.lower()] - def items(self): + def items(self): return self._headers.items() - def addnamedtext(self, name, text): + def addnamedtext(self, name, text): assert isinstance(text, basestring) assert isinstance(name, str) - self._blocknames.append(name) - self._blocks[name] = text + self._blocknames.append(name) + self._blocks[name] = text - def getnamedtext(self, name): + def getnamedtext(self, name): return self._blocks[name] - def repr_short_error(self): - if not self.isok(): - if 'reportdiff' in self._blocks: + def repr_short_error(self): + if not self.isok(): + if 'reportdiff' in self._blocks: return "output comparison failed, see reportdiff" - else: - text = self.getnamedtext('stderr') + else: + text = self.getnamedtext('stderr') lines = text.strip().split('\n') - if lines: + if lines: return lines[-1] - def repr_mimemessage(self): - from email.MIMEMultipart import MIMEMultipart + def repr_mimemessage(self): + from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText - + outer = MIMEMultipart() items = self._headers.items() items.sort() @@ -56,31 +56,31 @@ assert ':' not in name chars = map(ord, name) assert min(chars) >= 33 and max(chars) <= 126 - outer[name] = str(value) - if not isinstance(value, str): - typename = type(value).__name__ + outer[name] = str(value) + if not isinstance(value, str): + typename = type(value).__name__ assert typename in vars(py.std.__builtin__) - reprs[name] = typename + reprs[name] = typename - outer['_reprs'] = repr(reprs) - - for name in self._blocknames: + outer['_reprs'] = repr(reprs) + + for name in self._blocknames: text = self._blocks[name] m = MIMEText(text) m.add_header('Content-Disposition', 'attachment', filename=name) - outer.attach(m) - return outer + outer.attach(m) + return outer def grep_nr(self,text,section='stdout'): stdout = self._blocks[section] find = re.search('%s(?P\d+)'%text,stdout) - if find: + if find: return float(find.group('nr')) - return 0. + return 0. def ratio_of_passed(self): if self.isok(): - return 1. + return 1. elif self.istimeout(): return 0. else: @@ -88,16 +88,16 @@ if nr > 0: return (nr - (self.grep_nr('errors=') + self.grep_nr('failures=')))/nr else: - passed = self.grep_nr('TestFailed: ',section='stderr') - run = self.grep_nr('TestFailed: \d+/',section='stderr') - if run > 0: - return passed/run - else: - run = self.grep_nr('TestFailed: \d+ of ',section='stderr') - if run > 0 : - return (run-passed)/run - else: - return 0.0 + passed = self.grep_nr('TestFailed: ',section='stderr') + run = self.grep_nr('TestFailed: \d+/',section='stderr') + if run > 0: + return passed/run + else: + run = self.grep_nr('TestFailed: \d+ of ',section='stderr') + if run > 0 : + return (run-passed)/run + else: + return 0.0 def isok(self): return self['outcome'].lower() == 'ok' @@ -105,7 +105,7 @@ def iserror(self): return self['outcome'].lower()[:3] == 'err' or self['outcome'].lower() == 'fail' - def istimeout(self): + def istimeout(self): return self['outcome'].lower() == 't/o' # XXX backward compatibility @@ -114,7 +114,7 @@ return msg f = open(str(path), 'r') msg = f.read() - f.close() + f.close() for broken in ('exit status', 'cpu model', 'cpu mhz'): valid = broken.replace(' ','-') invalid = msg.find(broken+':') @@ -127,10 +127,10 @@ def sanitize_reprs(reprs): if 'exit status' in reprs: reprs['exit-status'] = reprs.pop('exit status') - -class ResultFromMime(Result): - def __init__(self, path): - super(ResultFromMime, self).__init__(init=False) + +class ResultFromMime(Result): + def __init__(self, path): + super(ResultFromMime, self).__init__(init=False) f = open(str(path), 'r') from email import message_from_file msg = message_from_file(f) @@ -142,48 +142,48 @@ self._reprs = eval(msg['_reprs']) del msg['_reprs'] sanitize_reprs(self._reprs) - for name, value in msg.items(): - if name in self._reprs: + for name, value in msg.items(): + if name in self._reprs: value = eval(value) # XXX security - self._headers[name] = value + self._headers[name] = value self.fspath = self['fspath'] - if self['platform'] == 'win32' and '\\' in self.fspath: + if self['platform'] == 'win32' and '\\' in self.fspath: self.testname = self.fspath.split('\\')[-1] - else: + else: self.testname = self.fspath.split('/')[-1] - #if sys.platform != 'win32' and '\\' in self.fspath: + #if sys.platform != 'win32' and '\\' in self.fspath: # self.fspath = py.path.local(self['fspath'].replace('\\' - self.path = path - - payload = msg.get_payload() - if payload: - for submsg in payload: + self.path = path + + payload = msg.get_payload() + if payload: + for submsg in payload: assert submsg.get_content_type() == 'text/plain' - fn = submsg.get_filename() + fn = submsg.get_filename() assert fn # XXX we need to deal better with encodings to # begin with content = submsg.get_payload() - for candidate in 'utf8', 'latin1': + for candidate in 'utf8', 'latin1': try: text = unicode(content, candidate) - except UnicodeDecodeError: + except UnicodeDecodeError: continue else: - unicode(content, candidate) - self.addnamedtext(fn, text) + unicode(content, candidate) + self.addnamedtext(fn, text) - def ismodifiedtest(self): - # XXX we need proper cross-platform paths! + def ismodifiedtest(self): + # XXX we need proper cross-platform paths! return 'modified' in self.fspath - def __repr__(self): - return '<%s (%s) %r rev=%s>' %(self.__class__.__name__, - self['outcome'], - self.fspath, + def __repr__(self): + return '<%s (%s) %r rev=%s>' %(self.__class__.__name__, + self['outcome'], + self.fspath, self['pypy-revision']) -def stdinit(result): +def stdinit(result): import getpass import socket try: @@ -192,24 +192,24 @@ username = 'unknown' userhost = '%s@%s' % (username, socket.gethostname()) result['testreport-version'] = "1.1.1" - result['userhost'] = userhost - result['platform'] = sys.platform - result['python-version-info'] = sys.version_info - info = try_getcpuinfo() + result['userhost'] = userhost + result['platform'] = sys.platform + result['python-version-info'] = sys.version_info + info = try_getcpuinfo() if info is not None: result['cpu-model'] = info.get('model name', "unknown") result['cpu-mhz'] = info.get('cpu mhz', 'unknown') # # # -def try_getcpuinfo(): - if sys.platform.startswith('linux'): +def try_getcpuinfo(): + if sys.platform.startswith('linux'): cpuinfopath = py.path.local('/proc/cpuinfo') - if cpuinfopath.check(file=1): + if cpuinfopath.check(file=1): d = {} - for line in cpuinfopath.readlines(): - if line.strip(): - name, value = line.split(':', 1) - name = name.strip().lower() - d[name] = value.strip() - return d + for line in cpuinfopath.readlines(): + if line.strip(): + name, value = line.split(':', 1) + name = name.strip().lower() + d[name] = value.strip() + return d diff --git a/pypy/tool/slaveproc.py b/pypy/tool/slaveproc.py --- a/pypy/tool/slaveproc.py +++ b/pypy/tool/slaveproc.py @@ -39,7 +39,7 @@ class SlaveProcess(object): _broken = False - + def __init__(self, slave_impl): if sys.platform == 'win32': unbuffered = '' @@ -58,7 +58,7 @@ def close(self): if not self._broken: - assert self.cmd(None) == 'done' + assert self.cmd(None) == 'done' self.exchg.forceclose() class Slave(object): @@ -70,7 +70,7 @@ exchg = Exchange(sys.stdin, sys.stdout) while True: try: - cmd = exchg.recv() + cmd = exchg.recv() except EOFError: # master died break if cmd is None: @@ -78,4 +78,3 @@ break result = self.do_cmd(cmd) exchg.send(result) - diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -900,123 +900,123 @@ return llobj def ctypes2lltype(T, cobj): - """Convert the ctypes object 'cobj' to its lltype equivalent. - 'T' is the expected lltype type. - """ - with rlock: - if T is lltype.Void: - return None - if isinstance(T, lltype.Typedef): - T = T.OF - if isinstance(T, lltype.Ptr): - ptrval = ctypes.cast(cobj, ctypes.c_void_p).value - if not cobj or not ptrval: # NULL pointer - # CFunctionType.__nonzero__ is broken before Python 2.6 - return lltype.nullptr(T.TO) - if isinstance(T.TO, lltype.Struct): - if T.TO._gckind == 'gc' and ptrval & 1: # a tagged pointer - gcref = _opaque_objs[ptrval // 2].hide() - return lltype.cast_opaque_ptr(T, gcref) - REAL_TYPE = T.TO - if T.TO._arrayfld is not None: - carray = getattr(cobj.contents, T.TO._arrayfld) - container = lltype._struct(T.TO, carray.length) + """Convert the ctypes object 'cobj' to its lltype equivalent. + 'T' is the expected lltype type. + """ + with rlock: + if T is lltype.Void: + return None + if isinstance(T, lltype.Typedef): + T = T.OF + if isinstance(T, lltype.Ptr): + ptrval = ctypes.cast(cobj, ctypes.c_void_p).value + if not cobj or not ptrval: # NULL pointer + # CFunctionType.__nonzero__ is broken before Python 2.6 + return lltype.nullptr(T.TO) + if isinstance(T.TO, lltype.Struct): + if T.TO._gckind == 'gc' and ptrval & 1: # a tagged pointer + gcref = _opaque_objs[ptrval // 2].hide() + return lltype.cast_opaque_ptr(T, gcref) + REAL_TYPE = T.TO + if T.TO._arrayfld is not None: + carray = getattr(cobj.contents, T.TO._arrayfld) + container = lltype._struct(T.TO, carray.length) + else: + # special treatment of 'OBJECT' subclasses + if get_rtyper() and lltype._castdepth(REAL_TYPE, OBJECT) >= 0: + # figure out the real type of the object + containerheader = lltype._struct(OBJECT) + cobjheader = ctypes.cast(cobj, + get_ctypes_type(lltype.Ptr(OBJECT))) + struct_use_ctypes_storage(containerheader, + cobjheader) + REAL_TYPE = get_rtyper().get_type_for_typeptr( + containerheader.typeptr) + REAL_T = lltype.Ptr(REAL_TYPE) + cobj = ctypes.cast(cobj, get_ctypes_type(REAL_T)) + container = lltype._struct(REAL_TYPE) + struct_use_ctypes_storage(container, cobj) + if REAL_TYPE != T.TO: + p = container._as_ptr() + container = lltype.cast_pointer(T, p)._as_obj() + # special treatment of 'OBJECT_VTABLE' subclasses + if get_rtyper() and lltype._castdepth(REAL_TYPE, + OBJECT_VTABLE) >= 0: + # figure out the real object that this vtable points to, + # and just return that + p = get_rtyper().get_real_typeptr_for_typeptr( + container._as_ptr()) + container = lltype.cast_pointer(T, p)._as_obj() + elif isinstance(T.TO, lltype.Array): + if T.TO._hints.get('nolength', False): + container = _array_of_unknown_length(T.TO) + container._storage = type(cobj)(cobj.contents) + else: + container = _array_of_known_length(T.TO) + container._storage = type(cobj)(cobj.contents) + elif isinstance(T.TO, lltype.FuncType): + cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) + if cobjkey in _int2obj: + container = _int2obj[cobjkey] + else: + _callable = get_ctypes_trampoline(T.TO, cobj) + return lltype.functionptr(T.TO, getattr(cobj, '__name__', '?'), + _callable=_callable) + elif isinstance(T.TO, lltype.OpaqueType): + if T == llmemory.GCREF: + container = _llgcopaque(cobj) + else: + container = lltype._opaque(T.TO) + cbuf = ctypes.cast(cobj, ctypes.c_void_p) + add_storage(container, _parentable_mixin, cbuf) else: - # special treatment of 'OBJECT' subclasses - if get_rtyper() and lltype._castdepth(REAL_TYPE, OBJECT) >= 0: - # figure out the real type of the object - containerheader = lltype._struct(OBJECT) - cobjheader = ctypes.cast(cobj, - get_ctypes_type(lltype.Ptr(OBJECT))) - struct_use_ctypes_storage(containerheader, - cobjheader) - REAL_TYPE = get_rtyper().get_type_for_typeptr( - containerheader.typeptr) - REAL_T = lltype.Ptr(REAL_TYPE) - cobj = ctypes.cast(cobj, get_ctypes_type(REAL_T)) - container = lltype._struct(REAL_TYPE) - struct_use_ctypes_storage(container, cobj) - if REAL_TYPE != T.TO: - p = container._as_ptr() - container = lltype.cast_pointer(T, p)._as_obj() - # special treatment of 'OBJECT_VTABLE' subclasses - if get_rtyper() and lltype._castdepth(REAL_TYPE, - OBJECT_VTABLE) >= 0: - # figure out the real object that this vtable points to, - # and just return that - p = get_rtyper().get_real_typeptr_for_typeptr( - container._as_ptr()) - container = lltype.cast_pointer(T, p)._as_obj() - elif isinstance(T.TO, lltype.Array): - if T.TO._hints.get('nolength', False): - container = _array_of_unknown_length(T.TO) - container._storage = type(cobj)(cobj.contents) + raise NotImplementedError(T) + llobj = lltype._ptr(T, container, solid=True) + elif T is llmemory.Address: + if cobj is None: + llobj = llmemory.NULL else: - container = _array_of_known_length(T.TO) - container._storage = type(cobj)(cobj.contents) - elif isinstance(T.TO, lltype.FuncType): - cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value) - if cobjkey in _int2obj: - container = _int2obj[cobjkey] - else: - _callable = get_ctypes_trampoline(T.TO, cobj) - return lltype.functionptr(T.TO, getattr(cobj, '__name__', '?'), - _callable=_callable) - elif isinstance(T.TO, lltype.OpaqueType): - if T == llmemory.GCREF: - container = _llgcopaque(cobj) - else: - container = lltype._opaque(T.TO) - cbuf = ctypes.cast(cobj, ctypes.c_void_p) - add_storage(container, _parentable_mixin, cbuf) - else: - raise NotImplementedError(T) - llobj = lltype._ptr(T, container, solid=True) - elif T is llmemory.Address: - if cobj is None: - llobj = llmemory.NULL - else: - llobj = _lladdress(cobj) - elif T is lltype.Char: - llobj = chr(cobj) - elif T is lltype.UniChar: - try: - llobj = unichr(cobj) - except (ValueError, OverflowError): - for tc in 'HIL': - if array(tc).itemsize == array('u').itemsize: - import struct - cobj &= 256 ** struct.calcsize(tc) - 1 - llobj = array('u', array(tc, (cobj,)).tostring())[0] - break - else: - raise - elif T is lltype.Signed: - llobj = cobj - elif T is lltype.Bool: - assert cobj == True or cobj == False # 0 and 1 work too - llobj = bool(cobj) - elif T is lltype.SingleFloat: - if isinstance(cobj, ctypes.c_float): - cobj = cobj.value - llobj = r_singlefloat(cobj) - elif T is lltype.LongFloat: - if isinstance(cobj, ctypes.c_longdouble): - cobj = cobj.value - llobj = r_longfloat(cobj) - elif T is lltype.Void: - llobj = cobj - else: - from rpython.rtyper.lltypesystem import rffi - try: - inttype = rffi.platform.numbertype_to_rclass[T] - except KeyError: + llobj = _lladdress(cobj) + elif T is lltype.Char: + llobj = chr(cobj) + elif T is lltype.UniChar: + try: + llobj = unichr(cobj) + except (ValueError, OverflowError): + for tc in 'HIL': + if array(tc).itemsize == array('u').itemsize: + import struct + cobj &= 256 ** struct.calcsize(tc) - 1 + llobj = array('u', array(tc, (cobj,)).tostring())[0] + break + else: + raise + elif T is lltype.Signed: + llobj = cobj + elif T is lltype.Bool: + assert cobj == True or cobj == False # 0 and 1 work too + llobj = bool(cobj) + elif T is lltype.SingleFloat: + if isinstance(cobj, ctypes.c_float): + cobj = cobj.value + llobj = r_singlefloat(cobj) + elif T is lltype.LongFloat: + if isinstance(cobj, ctypes.c_longdouble): + cobj = cobj.value + llobj = r_longfloat(cobj) + elif T is lltype.Void: llobj = cobj else: - llobj = inttype(cobj) + from rpython.rtyper.lltypesystem import rffi + try: + inttype = rffi.platform.numbertype_to_rclass[T] + except KeyError: + llobj = cobj + else: + llobj = inttype(cobj) - assert lltype.typeOf(llobj) == T - return llobj + assert lltype.typeOf(llobj) == T + return llobj def uninitialized2ctypes(T): "For debugging, create a ctypes object filled with 0xDD." @@ -1244,52 +1244,52 @@ def force_cast(RESTYPE, value): - with rlock: - if not isinstance(RESTYPE, lltype.LowLevelType): - raise TypeError("rffi.cast() first arg should be a TYPE") - if isinstance(value, llmemory.AddressAsInt): - value = value.adr - if isinstance(value, llmemory.fakeaddress): - value = value.ptr or 0 - if isinstance(value, r_singlefloat): - value = float(value) - TYPE1 = lltype.typeOf(value) - cvalue = lltype2ctypes(value) - cresulttype = get_ctypes_type(RESTYPE) - if RESTYPE == TYPE1: - return value - elif isinstance(TYPE1, lltype.Ptr): + with rlock: + if not isinstance(RESTYPE, lltype.LowLevelType): + raise TypeError("rffi.cast() first arg should be a TYPE") + if isinstance(value, llmemory.AddressAsInt): + value = value.adr + if isinstance(value, llmemory.fakeaddress): + value = value.ptr or 0 + if isinstance(value, r_singlefloat): + value = float(value) + TYPE1 = lltype.typeOf(value) + cvalue = lltype2ctypes(value) + cresulttype = get_ctypes_type(RESTYPE) + if RESTYPE == TYPE1: + return value + elif isinstance(TYPE1, lltype.Ptr): + if isinstance(RESTYPE, lltype.Ptr): + # shortcut: ptr->ptr cast + cptr = ctypes.cast(cvalue, cresulttype) + return ctypes2lltype(RESTYPE, cptr) + # first cast the input pointer to an integer + cvalue = ctypes.cast(cvalue, ctypes.c_void_p).value + if cvalue is None: + cvalue = 0 + elif isinstance(cvalue, (str, unicode)): + cvalue = ord(cvalue) # character -> integer + elif hasattr(RESTYPE, "_type") and issubclass(RESTYPE._type, base_int): + cvalue = int(cvalue) + elif isinstance(cvalue, r_longfloat): + cvalue = cvalue.value + + if not isinstance(cvalue, (int, long, float)): + raise NotImplementedError("casting %r to %r" % (TYPE1, RESTYPE)) + if isinstance(RESTYPE, lltype.Ptr): - # shortcut: ptr->ptr cast - cptr = ctypes.cast(cvalue, cresulttype) - return ctypes2lltype(RESTYPE, cptr) - # first cast the input pointer to an integer - cvalue = ctypes.cast(cvalue, ctypes.c_void_p).value - if cvalue is None: - cvalue = 0 - elif isinstance(cvalue, (str, unicode)): - cvalue = ord(cvalue) # character -> integer - elif hasattr(RESTYPE, "_type") and issubclass(RESTYPE._type, base_int): - cvalue = int(cvalue) - elif isinstance(cvalue, r_longfloat): - cvalue = cvalue.value - - if not isinstance(cvalue, (int, long, float)): - raise NotImplementedError("casting %r to %r" % (TYPE1, RESTYPE)) - - if isinstance(RESTYPE, lltype.Ptr): - # upgrade to a more recent ctypes (e.g. 1.0.2) if you get - # an OverflowError on the following line. - cvalue = ctypes.cast(ctypes.c_void_p(cvalue), cresulttype) - elif RESTYPE == lltype.Bool: - cvalue = bool(cvalue) - else: - try: - cvalue = cresulttype(cvalue).value # mask high bits off if needed - except TypeError: - cvalue = int(cvalue) # float -> int - cvalue = cresulttype(cvalue).value # try again - return ctypes2lltype(RESTYPE, cvalue) + # upgrade to a more recent ctypes (e.g. 1.0.2) if you get + # an OverflowError on the following line. + cvalue = ctypes.cast(ctypes.c_void_p(cvalue), cresulttype) + elif RESTYPE == lltype.Bool: + cvalue = bool(cvalue) + else: + try: + cvalue = cresulttype(cvalue).value # mask high bits off if needed + except TypeError: + cvalue = int(cvalue) # float -> int + cvalue = cresulttype(cvalue).value # try again + return ctypes2lltype(RESTYPE, cvalue) class ForceCastEntry(ExtRegistryEntry): _about_ = force_cast diff --git a/rpython/rtyper/lltypesystem/test/test_lltype.py b/rpython/rtyper/lltypesystem/test/test_lltype.py --- a/rpython/rtyper/lltypesystem/test/test_lltype.py +++ b/rpython/rtyper/lltypesystem/test/test_lltype.py @@ -580,9 +580,9 @@ (Unsigned, u"x", ord(u'x')), ] for TGT, orig_val, expect in cases: - res = cast_primitive(TGT, orig_val) - assert typeOf(res) == TGT - assert res == expect + res = cast_primitive(TGT, orig_val) + assert typeOf(res) == TGT + assert res == expect res = cast_primitive(SingleFloat, 2.1) assert isinstance(res, r_singlefloat) assert float(res) == float(r_singlefloat(2.1)) diff --git a/rpython/translator/goal/order.py b/rpython/translator/goal/order.py --- a/rpython/translator/goal/order.py +++ b/rpython/translator/goal/order.py @@ -9,10 +9,10 @@ lst = open(module_list, 'r') try: - print "reading module-list: %s" % module_list - prefixes = lst.readlines() + print "reading module-list: %s" % module_list + prefixes = lst.readlines() finally: - lst.close() + lst.close() prefixes = [line.strip() for line in prefixes] prefixes = [line for line in prefixes if line and not line.startswith('#')] From noreply at buildbot.pypy.org Wed Jul 10 06:13:47 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 10 Jul 2013 06:13:47 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20130710041347.15C541C3364@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65310:42b33951190c Date: 2013-07-10 14:13 +1000 http://bitbucket.org/pypy/pypy/changeset/42b33951190c/ Log: merged upstream diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -2,13 +2,13 @@ PyPy 2.1 beta 1 =============== -We're pleased to announce the first beta of the upcomming 2.1 release of PyPy. +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. This beta contains many bugfixes and improvements, numerous improvements to the numpy in pypy effort. The main feature being that the ARM processor support is not longer considered alpha level. We would like to thank the `Raspberry Pi Foundation`_ for supporting the work to finish PyPy's ARM support. -You can download the PyPy 2.0 beta 1 release here: +You can download the PyPy 2.1 beta 1 release here: http://pypy.org/download.html diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -79,6 +79,9 @@ # OS_RAW_MALLOC_VARSIZE_CHAR = 110 OS_RAW_FREE = 111 + # + OS_STR_COPY_TO_RAW = 112 + OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1665,12 +1665,14 @@ dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT, "stroruni.slice": EffectInfo.OS_STR_SLICE, "stroruni.equal": EffectInfo.OS_STR_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_STR_COPY_TO_RAW, } CHR = lltype.Char elif SoU.TO == rstr.UNICODE: dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT, "stroruni.slice": EffectInfo.OS_UNI_SLICE, "stroruni.equal": EffectInfo.OS_UNI_EQUAL, + "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar else: diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -677,7 +677,8 @@ def make_string_mappings(strtype): if strtype is str: - from rpython.rtyper.lltypesystem.rstr import STR as STRTYPE + from rpython.rtyper.lltypesystem.rstr import (STR as STRTYPE, + copy_string_to_raw) from rpython.rtyper.annlowlevel import llstr as llstrtype from rpython.rtyper.annlowlevel import hlstr as hlstrtype TYPEP = CCHARP @@ -685,7 +686,9 @@ lastchar = '\x00' builder_class = StringBuilder else: - from rpython.rtyper.lltypesystem.rstr import UNICODE as STRTYPE + from rpython.rtyper.lltypesystem.rstr import ( + UNICODE as STRTYPE, + copy_unicode_to_raw as copy_string_to_raw) from rpython.rtyper.annlowlevel import llunicode as llstrtype from rpython.rtyper.annlowlevel import hlunicode as hlstrtype TYPEP = CWCHARP @@ -702,11 +705,9 @@ else: array = lltype.malloc(TYPEP.TO, len(s) + 1, flavor='raw', track_allocation=False) i = len(s) + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, 0, i) array[i] = lastchar - i -= 1 - while i >= 0: - array[i] = s[i] - i -= 1 return array str2charp._annenforceargs_ = [strtype, bool] @@ -739,14 +740,14 @@ string is already nonmovable. Must be followed by a free_nonmovingbuffer call. """ + lldata = llstrtype(data) if rgc.can_move(data): count = len(data) buf = lltype.malloc(TYPEP.TO, count, flavor='raw') - for i in range(count): - buf[i] = data[i] + copy_string_to_raw(lldata, buf, 0, count) return buf else: - data_start = cast_ptr_to_adr(llstrtype(data)) + \ + data_start = cast_ptr_to_adr(lldata) + \ offsetof(STRTYPE, 'chars') + itemoffsetof(STRTYPE.chars, 0) return cast(TYPEP, data_start) get_nonmovingbuffer._annenforceargs_ = [strtype] diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -49,16 +49,18 @@ def emptyunicodefun(): return emptyunicode -def _new_copy_contents_fun(SRC_TP, DST_TP, CHAR_TP, name): - def _str_ofs_src(item): - return (llmemory.offsetof(SRC_TP, 'chars') + - llmemory.itemoffsetof(SRC_TP.chars, 0) + +def _new_copy_contents_fun(STR_TP, CHAR_TP, name): + def _str_ofs(item): + return (llmemory.offsetof(STR_TP, 'chars') + + llmemory.itemoffsetof(STR_TP.chars, 0) + llmemory.sizeof(CHAR_TP) * item) - def _str_ofs_dst(item): - return (llmemory.offsetof(DST_TP, 'chars') + - llmemory.itemoffsetof(DST_TP.chars, 0) + - llmemory.sizeof(CHAR_TP) * item) + @signature(types.any(), types.int(), returns=types.any()) + def _get_raw_buf(src, ofs): + assert typeOf(src).TO == STR_TP + assert ofs >= 0 + return llmemory.cast_ptr_to_adr(src) + _str_ofs(ofs) + _get_raw_buf._always_inline_ = True @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @signature(types.any(), types.any(), types.int(), types.int(), types.int(), returns=types.none()) @@ -71,22 +73,42 @@ # because it might move the strings. The keepalive_until_here() # are obscurely essential to make sure that the strings stay alive # longer than the raw_memcopy(). - assert typeOf(src).TO == SRC_TP - assert typeOf(dst).TO == DST_TP - assert srcstart >= 0 - assert dststart >= 0 assert length >= 0 - src = llmemory.cast_ptr_to_adr(src) + _str_ofs_src(srcstart) - dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs_dst(dststart) + # from here, no GC operations can happen + src = _get_raw_buf(src, srcstart) + dst = _get_raw_buf(dst, dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) + # end of "no GC" section keepalive_until_here(src) keepalive_until_here(dst) copy_string_contents._always_inline_ = True - return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name) + copy_string_contents = func_with_new_name(copy_string_contents, + 'copy_%s_contents' % name) -copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') -copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, - 'unicode') + @jit.oopspec('stroruni.copy_string_to_raw(src, ptrdst, srcstart, length)') + def copy_string_to_raw(src, ptrdst, srcstart, length): + """ + Copies 'length' characters from the 'src' string to the 'ptrdst' + buffer, starting at position 'srcstart'. + 'ptrdst' must be a non-gc Array of Char. + """ + # xxx Warning: same note as above apply: don't do this at home + assert length >= 0 + # from here, no GC operations can happen + src = _get_raw_buf(src, srcstart) + adr = llmemory.cast_ptr_to_adr(ptrdst) + dstbuf = adr + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0) + llmemory.raw_memcopy(src, dstbuf, llmemory.sizeof(CHAR_TP) * length) + # end of "no GC" section + keepalive_until_here(src) + copy_string_to_raw._always_inline_ = True + copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name) + + return copy_string_to_raw, copy_string_contents + +copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') +copy_unicode_to_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, + UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() CONST_UNICODE_CACHE = WeakValueDictionary() diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -1118,6 +1118,26 @@ res = self.interpret(f, [5]) assert res == 0 + def test_copy_string_to_raw(self): + from rpython.rtyper.lltypesystem import lltype, llmemory + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + + def f(buf, n): + s = 'abc' * n + ll_s = llstr(s) + copy_string_to_raw(ll_s, buf, 0, n*3) + + TP = lltype.Array(lltype.Char) + array = lltype.malloc(TP, 12, flavor='raw') + f(array, 4) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') + + array = lltype.malloc(TP, 12, flavor='raw') + self.interpret(f, [array, 4]) + assert list(array) == list('abc'*4) + lltype.free(array, flavor='raw') class TestOOtype(BaseTestRstr, OORtypeMixin): pass From noreply at buildbot.pypy.org Wed Jul 10 08:59:36 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 10 Jul 2013 08:59:36 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: test current approach Message-ID: <20130710065936.52FAE1C0EF6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r380:f6e27ee66d12 Date: 2013-07-10 07:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/f6e27ee66d12/ Log: test current approach diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -308,6 +308,23 @@ check_free_old(p2) check_not_free(p3) # XXX replace with p1 +def test_prebuilt_version_2_copy_over_prebuilt(): + p1 = lib.pseudoprebuilt(HDR, 42 + HDR) + p2 = oalloc(HDR); make_public(p2) + p3 = oalloc(HDR); make_public(p3) + delegate(p1, p2) + delegate_original(p1, p2) + delegate(p2, p3) + delegate_original(p1, p3) + major_collect() + # XXX: current approach requires 2 major collections. + # the first to compress the path + # the second to do the copy + major_collect() + check_prebuilt(p1) + check_free_old(p2) + check_free_old(p3) + def test_prebuilt_version_to_protected(): p1 = lib.pseudoprebuilt(HDR, 42 + HDR) p2 = lib.stm_write_barrier(p1) @@ -321,6 +338,24 @@ check_prebuilt(p1) check_not_free(p2) # XXX replace with p1 +def test_prebuilt_version_to_protected_copy_over_prebuilt(): + py.test.skip("""current copy-over-prebuilt-original approach + does not work with public_prebuilt->protected""") + p1 = lib.pseudoprebuilt(HDR, 42 + HDR) + p2 = lib.stm_write_barrier(p1) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + minor_collect() + p2 = lib.stm_read_barrier(p1) + assert p2 != p1 + minor_collect() + major_collect() + major_collect() + print classify(p2) + check_prebuilt(p1) + check_free_old(p2) + + def test_private(): p1 = nalloc(HDR) lib.stm_push_root(p1) From noreply at buildbot.pypy.org Wed Jul 10 08:59:37 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 10 Jul 2013 08:59:37 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: do a bit more Message-ID: <20130710065937.7F7081C1055@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r381:c1d5e6f19828 Date: 2013-07-10 08:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/c1d5e6f19828/ Log: do a bit more diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -367,68 +367,70 @@ gcptr *pobj = stm_prebuilt_gcroots.items; gcptr *pend = stm_prebuilt_gcroots.items + stm_prebuilt_gcroots.size; gcptr obj; + for (; pobj != pend; pobj++) { obj = *pobj; assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - //assert(IS_POINTER(obj->h_revision)); + + if (IS_POINTER(obj->h_revision)) { + visit((gcptr *)&obj->h_revision); + gcptr next = (gcptr)obj->h_revision; + + if (IS_POINTER((revision_t)next) /* needs to be an object */ + && (next->h_revision & 1) /* needs to be a head rev */ + && !(obj->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) { + + /* XXX: WHY? */ + assert(!(next->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); + + assert(next->h_tid & GCFLAG_OLD); /* not moved already */ + assert(next->h_original == (revision_t)obj); + assert(next->h_tid & GCFLAG_PUBLIC); + assert(!(next->h_tid & GCFLAG_STUB)); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!(next->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(next->h_tid & GCFLAG_BACKUP_COPY)); + + /* copy next over obj but preserve possibly existing + pre-hash value and tid (prebuilt-flag) */ + revision_t pre_hash = obj->h_original; + revision_t old_tid = obj->h_tid; + memcpy(obj, next, stmgc_size(next)); + assert(!((obj->h_tid ^ old_tid) + & (GCFLAG_BACKUP_COPY | GCFLAG_STUB + | GCFLAG_PUBLIC | GCFLAG_HAS_ID + | GCFLAG_PRIVATE_FROM_PROTECTED))); + obj->h_original = pre_hash; + obj->h_tid = old_tid; + + fprintf(stdout, "copy %p over prebuilt %p\n", next, obj); + + /* Add this copied-over head revision to objects_to_trace + because it (next) was added by the preceeding visit() + but not at its new location (obj): */ + gcptrlist_insert(&objects_to_trace, obj); - gcptr next = (gcptr)obj->h_revision; - /* XXX: do better. visit obj first and then - copy over if possible: */ - if (!(obj->h_revision & 1) - && (next->h_revision & 1) - && !(next->h_tid & GCFLAG_VISITED) - && (next->h_tid & GCFLAG_OLD) - && !(next->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) /* XXX */ - && !(obj->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) { + /* For those visiting later: */ + next->h_revision = (revision_t)obj; - assert(next->h_original == (revision_t)obj); - assert(next->h_tid & GCFLAG_PUBLIC); - assert(!(next->h_tid & GCFLAG_STUB)); - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(next->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(next->h_tid & GCFLAG_BACKUP_COPY)); + /* mark somehow so that we can update pub_to_priv + for inevitable transactions and others ignore + it during tracing. Otherwise, inev transactions + will think 'next' is outdated. */ + next->h_tid &= ~GCFLAG_OLD; - - revision_t pre_hash = obj->h_original; - revision_t old_tid = obj->h_tid; - memcpy(obj, next, stmgc_size(next)); - assert(!((obj->h_tid ^ old_tid) - & (GCFLAG_BACKUP_COPY | GCFLAG_STUB - | GCFLAG_PUBLIC | GCFLAG_HAS_ID - | GCFLAG_PRIVATE_FROM_PROTECTED))); - obj->h_original = pre_hash; - obj->h_tid = old_tid; + } + /* obj does not need tracing if it can't + be reached from somewhere else*/ + } + else { + gcptrlist_insert(&objects_to_trace, obj); + } - fprintf(stdout, "copy %p over prebuilt %p\n", next, obj); - - /* will not be freed anyway and visit() only traces - head revision if not visited already */ - obj->h_tid &= ~GCFLAG_VISITED; - /* For those visiting later: - XXX: don't: they will think that they are outdated*/ - next->h_revision = (revision_t)obj; - //if (next->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { - // may have already lost it - /* mark somehow so that we can update pub_to_priv - for inevitable transactions and others ignore - it during tracing. Otherwise, inev transactions - will think 'next' is outdated. */ - next->h_tid &= ~GCFLAG_OLD; - //} - } - else if (IS_POINTER(obj->h_revision)) { - visit((gcptr *)&obj->h_revision); - } - - // prebuilt originals will always be traced - // in visit_keep. And otherwise, they may - // not lose their pub_to_priv flag - // I think because transactions abort - // without clearing the flags. + /* prebuilt objects need to lose this flag. + aborted transactions do not clear it themselves */ obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; - gcptrlist_insert(&objects_to_trace, obj); } } diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -583,7 +583,9 @@ lib.stm_add_prebuilt_root(p1) def delegate_original(p1, p2): - assert p1.h_original == 0 + # no h_original or it is a prebuilt with a specified hash in h_original + assert (p1.h_original == 0) or (p1.h_tid & GCFLAG_PREBUILT_ORIGINAL) + assert p1.h_tid & GCFLAG_OLD assert p2.h_original == 0 assert p1 != p2 p2.h_original = ffi.cast("revision_t", p1) diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -309,20 +309,23 @@ check_not_free(p3) # XXX replace with p1 def test_prebuilt_version_2_copy_over_prebuilt(): - p1 = lib.pseudoprebuilt(HDR, 42 + HDR) + p1 = lib.pseudoprebuilt_with_hash(HDR, 42 + HDR, 99) p2 = oalloc(HDR); make_public(p2) p3 = oalloc(HDR); make_public(p3) delegate(p1, p2) delegate_original(p1, p2) delegate(p2, p3) delegate_original(p1, p3) - major_collect() - # XXX: current approach requires 2 major collections. - # the first to compress the path - # the second to do the copy + # added by delegate, remove, otherwise + # major_collect will not copy over prebuilt p1: + p1.h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE major_collect() check_prebuilt(p1) + assert lib.stm_hash(p1) == 99 check_free_old(p2) + check_not_free(p3) + # XXX: takes another major collection to free p3 + major_collect() check_free_old(p3) def test_prebuilt_version_to_protected(): @@ -340,7 +343,7 @@ def test_prebuilt_version_to_protected_copy_over_prebuilt(): py.test.skip("""current copy-over-prebuilt-original approach - does not work with public_prebuilt->protected""") + does not work with public_prebuilt->stub->protected""") p1 = lib.pseudoprebuilt(HDR, 42 + HDR) p2 = lib.stm_write_barrier(p1) lib.stm_commit_transaction() From noreply at buildbot.pypy.org Wed Jul 10 10:21:28 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 10 Jul 2013 10:21:28 +0200 (CEST) Subject: [pypy-commit] pypy default: improve the error message Message-ID: <20130710082128.96EB51C0EF6@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65311:7e9ee52bd7d5 Date: 2013-07-10 10:20 +0200 http://bitbucket.org/pypy/pypy/changeset/7e9ee52bd7d5/ Log: improve the error message diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -52,7 +52,8 @@ HAS = rffi_platform.Has("setupterm") if rffi_platform.configure(CConfig)['HAS']: return eci - raise ImportError("failed to guess where ncurses is installed") + raise ImportError("failed to guess where ncurses is installed. " + "You might need to install libncurses5-dev or similar.") eci = guess_eci() From noreply at buildbot.pypy.org Wed Jul 10 12:53:20 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 10 Jul 2013 12:53:20 +0200 (CEST) Subject: [pypy-commit] pypy default: backout 3fc9da9637c3 Message-ID: <20130710105321.020661C0EF6@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65312:aa6b3fad1ce2 Date: 2013-07-10 05:43 -0500 http://bitbucket.org/pypy/pypy/changeset/aa6b3fad1ce2/ Log: backout 3fc9da9637c3 diff --git a/rpython/jit/backend/arm/test/conftest.py b/rpython/jit/backend/arm/test/conftest.py --- a/rpython/jit/backend/arm/test/conftest.py +++ b/rpython/jit/backend/arm/test/conftest.py @@ -16,5 +16,7 @@ dest="run_translation_tests", help="run tests that translate code") -def pytest_ignore_collect(path, config): - return not cpu.startswith('arm') +def pytest_collect_directory(path, parent): + if not cpu.startswith('arm'): + py.test.skip("ARM(v7) tests skipped: cpu is %r" % (cpu,)) +pytest_collect_file = pytest_collect_directory From noreply at buildbot.pypy.org Wed Jul 10 14:05:26 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 10 Jul 2013 14:05:26 +0200 (CEST) Subject: [pypy-commit] pypy default: ignore guard_not_invalidated in some more places Message-ID: <20130710120526.6B4C91C3020@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65313:6227a8790103 Date: 2013-07-10 06:55 -0500 http://bitbucket.org/pypy/pypy/changeset/6227a8790103/ Log: ignore guard_not_invalidated in some more places diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -548,10 +548,10 @@ log = self.run(f, import_site=True) loop, = log.loops_by_id('ntohs') assert loop.match_by_id('ntohs', """ - guard_not_invalidated(descr=...) p12 = call(ConstClass(ntohs), 1, descr=...) guard_no_exception(descr=...) - """) + """, + include_guard_not_invalidated=False) # py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -105,7 +105,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) f13 = getarrayitem_raw(i8, i6, descr=) @@ -117,7 +116,7 @@ i20 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_array_of_floats(self): try: @@ -142,7 +141,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) i13 = getarrayitem_raw(i8, i6, descr=) @@ -157,7 +155,7 @@ i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_zeropadded(self): From noreply at buildbot.pypy.org Wed Jul 10 14:47:57 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 10 Jul 2013 14:47:57 +0200 (CEST) Subject: [pypy-commit] pypy default: add some more highlights to the beta release notes Message-ID: <20130710124757.2907E1C0EF5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65314:a009475abbd3 Date: 2013-07-10 07:36 -0500 http://bitbucket.org/pypy/pypy/changeset/a009475abbd3/ Log: add some more highlights to the beta release notes diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -18,16 +18,20 @@ ========== * Bugfixes to the ARM JIT backend, so that ARM is now an officially - supported processor architecture. + supported processor architecture -* Various numpy improvements. +* Stacklet support on ARM -* Bugfixes to cffi and ctypes. +* Various numpy improvements + +* Bugfixes to cffi and ctypes * Bugfixes to the stacklet support * Improved logging performance +* Faster sets for objects + What is PyPy? ============= From noreply at buildbot.pypy.org Wed Jul 10 14:47:58 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 10 Jul 2013 14:47:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Reset whatsnew-head Message-ID: <20130710124758.C3D9F1C0EF5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65315:c34e4db0d094 Date: 2013-07-10 07:37 -0500 http://bitbucket.org/pypy/pypy/changeset/c34e4db0d094/ Log: Reset whatsnew-head diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,77 +2,6 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: ndarray-ptp -put and array.put - -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) - -.. branch: remove-array-smm -Remove multimethods in the arraymodule - -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback - -.. branch: remove-set-smm -Remove multi-methods on sets - -.. branch: numpy-subarrays -Implement subarrays for numpy - -.. branch: remove-dict-smm -Remove multi-methods on dict - -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative -.. branch: ndarray-round - -.. branch: faster-str-of-bigint -Improve performance of str(long). - -.. branch: ndarray-view -Add view to ndarray and zeroD arrays, not on dtype scalars yet - -.. branch: numpypy-segfault -fix segfault caused by iterating over empty ndarrays - -.. branch: identity-set -Faster sets for objects - -.. branch: inline-identityhash -Inline the fast path of id() and hash() From noreply at buildbot.pypy.org Wed Jul 10 16:06:04 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 10 Jul 2013 16:06:04 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test_rbytearray.py, which was broken by the improve-str2charp branch Message-ID: <20130710140604.B55CF1C3577@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65316:07d4ffd55e14 Date: 2013-07-10 14:04 +0200 http://bitbucket.org/pypy/pypy/changeset/07d4ffd55e14/ Log: fix test_rbytearray.py, which was broken by the improve-str2charp branch diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -8,13 +8,13 @@ def mallocbytearray(size): return lltype.malloc(BYTEARRAY, size) -copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, - lltype.Char, - 'bytearray') -copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, - BYTEARRAY, - lltype.Char, - 'bytearray_from_str') +_, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, + lltype.Char, + 'bytearray') +_, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, + BYTEARRAY, + lltype.Char, + 'bytearray_from_str') BYTEARRAY.become(lltype.GcStruct('rpy_bytearray', ('chars', lltype.Array(lltype.Char)), adtmeths={ diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -49,17 +49,19 @@ def emptyunicodefun(): return emptyunicode -def _new_copy_contents_fun(STR_TP, CHAR_TP, name): - def _str_ofs(item): - return (llmemory.offsetof(STR_TP, 'chars') + - llmemory.itemoffsetof(STR_TP.chars, 0) + +def _new_copy_contents_fun(SRC_TP, DST_TP, CHAR_TP, name): + @specialize.arg(0) + def _str_ofs(TP, item): + return (llmemory.offsetof(TP, 'chars') + + llmemory.itemoffsetof(TP.chars, 0) + llmemory.sizeof(CHAR_TP) * item) - @signature(types.any(), types.int(), returns=types.any()) - def _get_raw_buf(src, ofs): - assert typeOf(src).TO == STR_TP + @signature(types.any(), types.any(), types.int(), returns=types.any()) + @specialize.arg(0) + def _get_raw_buf(TP, src, ofs): + assert typeOf(src).TO == TP assert ofs >= 0 - return llmemory.cast_ptr_to_adr(src) + _str_ofs(ofs) + return llmemory.cast_ptr_to_adr(src) + _str_ofs(TP, ofs) _get_raw_buf._always_inline_ = True @jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)') @@ -75,8 +77,8 @@ # longer than the raw_memcopy(). assert length >= 0 # from here, no GC operations can happen - src = _get_raw_buf(src, srcstart) - dst = _get_raw_buf(dst, dststart) + src = _get_raw_buf(SRC_TP, src, srcstart) + dst = _get_raw_buf(DST_TP, dst, dststart) llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length) # end of "no GC" section keepalive_until_here(src) @@ -95,7 +97,7 @@ # xxx Warning: same note as above apply: don't do this at home assert length >= 0 # from here, no GC operations can happen - src = _get_raw_buf(src, srcstart) + src = _get_raw_buf(SRC_TP, src, srcstart) adr = llmemory.cast_ptr_to_adr(ptrdst) dstbuf = adr + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0) llmemory.raw_memcopy(src, dstbuf, llmemory.sizeof(CHAR_TP) * length) @@ -106,8 +108,8 @@ return copy_string_to_raw, copy_string_contents -copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, Char, 'string') -copy_unicode_to_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, +copy_string_to_raw, copy_string_contents = _new_copy_contents_fun(STR, STR, Char, 'string') +copy_unicode_to_raw, copy_unicode_contents = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, 'unicode') CONST_STR_CACHE = WeakValueDictionary() From noreply at buildbot.pypy.org Wed Jul 10 16:06:06 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 10 Jul 2013 16:06:06 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130710140606.18D051C3619@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65317:ac701565be25 Date: 2013-07-10 16:01 +0200 http://bitbucket.org/pypy/pypy/changeset/ac701565be25/ Log: merge heads diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -18,16 +18,20 @@ ========== * Bugfixes to the ARM JIT backend, so that ARM is now an officially - supported processor architecture. + supported processor architecture -* Various numpy improvements. +* Stacklet support on ARM -* Bugfixes to cffi and ctypes. +* Various numpy improvements + +* Bugfixes to cffi and ctypes * Bugfixes to the stacklet support * Improved logging performance +* Faster sets for objects + What is PyPy? ============= diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,77 +2,6 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: ndarray-ptp -put and array.put - -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) - -.. branch: remove-array-smm -Remove multimethods in the arraymodule - -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback - -.. branch: remove-set-smm -Remove multi-methods on sets - -.. branch: numpy-subarrays -Implement subarrays for numpy - -.. branch: remove-dict-smm -Remove multi-methods on dict - -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative -.. branch: ndarray-round - -.. branch: faster-str-of-bigint -Improve performance of str(long). - -.. branch: ndarray-view -Add view to ndarray and zeroD arrays, not on dtype scalars yet - -.. branch: numpypy-segfault -fix segfault caused by iterating over empty ndarrays - -.. branch: identity-set -Faster sets for objects - -.. branch: inline-identityhash -Inline the fast path of id() and hash() diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -548,10 +548,10 @@ log = self.run(f, import_site=True) loop, = log.loops_by_id('ntohs') assert loop.match_by_id('ntohs', """ - guard_not_invalidated(descr=...) p12 = call(ConstClass(ntohs), 1, descr=...) guard_no_exception(descr=...) - """) + """, + include_guard_not_invalidated=False) # py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -105,7 +105,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) f13 = getarrayitem_raw(i8, i6, descr=) @@ -117,7 +116,7 @@ i20 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_array_of_floats(self): try: @@ -142,7 +141,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) i13 = getarrayitem_raw(i8, i6, descr=) @@ -157,7 +155,7 @@ i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_zeropadded(self): diff --git a/rpython/jit/backend/arm/test/conftest.py b/rpython/jit/backend/arm/test/conftest.py --- a/rpython/jit/backend/arm/test/conftest.py +++ b/rpython/jit/backend/arm/test/conftest.py @@ -16,5 +16,7 @@ dest="run_translation_tests", help="run tests that translate code") -def pytest_ignore_collect(path, config): - return not cpu.startswith('arm') +def pytest_collect_directory(path, parent): + if not cpu.startswith('arm'): + py.test.skip("ARM(v7) tests skipped: cpu is %r" % (cpu,)) +pytest_collect_file = pytest_collect_directory From noreply at buildbot.pypy.org Wed Jul 10 16:06:07 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 10 Jul 2013 16:06:07 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20130710140607.5800B1C3577@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r65318:5e3c9717930a Date: 2013-07-10 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/5e3c9717930a/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,10 @@ .. this is a revision shortly after release-2.1-beta .. startrev: 4eb52818e7c0 +.. branch: fastjson +Fast json decoder written in RPython, about 3-4x faster than the pure Python +decoder which comes with the stdlib + +.. branch: improve-str2charp +Improve the performance of I/O writing up to 15% by using memcpy instead of +copying char-by-char in str2charp and get_nonmovingbuffer From noreply at buildbot.pypy.org Wed Jul 10 17:21:50 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 10 Jul 2013 17:21:50 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: it seems this is enough to enable fastpath for ['self', 'space'] unwrap_spec Message-ID: <20130710152150.1911B1C1055@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: release-2.1.x Changeset: r65319:9baca13ddc39 Date: 2013-07-09 15:04 +0200 http://bitbucket.org/pypy/pypy/changeset/9baca13ddc39/ Log: it seems this is enough to enable fastpath for ['self', 'space'] unwrap_spec diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") From noreply at buildbot.pypy.org Wed Jul 10 17:21:51 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 10 Jul 2013 17:21:51 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: ignore guard_not_invalidated in some more places Message-ID: <20130710152151.466E01C1509@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65320:daf1b0412bfb Date: 2013-07-10 06:55 -0500 http://bitbucket.org/pypy/pypy/changeset/daf1b0412bfb/ Log: ignore guard_not_invalidated in some more places diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -548,10 +548,10 @@ log = self.run(f, import_site=True) loop, = log.loops_by_id('ntohs') assert loop.match_by_id('ntohs', """ - guard_not_invalidated(descr=...) p12 = call(ConstClass(ntohs), 1, descr=...) guard_no_exception(descr=...) - """) + """, + include_guard_not_invalidated=False) # py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -105,7 +105,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) f13 = getarrayitem_raw(i8, i6, descr=) @@ -117,7 +116,7 @@ i20 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_array_of_floats(self): try: @@ -142,7 +141,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) i13 = getarrayitem_raw(i8, i6, descr=) @@ -157,7 +155,7 @@ i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_zeropadded(self): From noreply at buildbot.pypy.org Wed Jul 10 18:20:39 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 10 Jul 2013 18:20:39 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove all dependencies on ootypesystem from rpython/jit/ Message-ID: <20130710162039.98FD41C0325@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65321:9ffbd90b31dd Date: 2013-07-10 03:58 +0200 http://bitbucket.org/pypy/pypy/changeset/9ffbd90b31dd/ Log: Remove all dependencies on ootypesystem from rpython/jit/ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,7 +1,6 @@ import py, random from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -1,7 +1,6 @@ import py, sys from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.ootypesystem import ootype from rpython.jit.backend.llgraph import runner from rpython.jit.metainterp.warmspot import ll_meta_interp, get_stats from rpython.jit.metainterp.warmstate import unspecialize_value diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,7 +14,6 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rtyper.ootypesystem import ootype class BasicTests: @@ -1218,8 +1217,6 @@ res = self.interp_operations(fn, [1]) assert res == 1 self.check_operations_history(guard_class=0) - if self.type_system == 'ootype': - self.check_operations_history(instanceof=0) def test_r_dict(self): from rpython.rlib.objectmodel import r_dict diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -5,7 +5,6 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rtyper.lltypesystem import lltype, rclass, rffi from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.ootypesystem import ootype from rpython.jit.codewriter import heaptracker class VirtualTests: @@ -1277,13 +1276,6 @@ return lltype.malloc(NODE) - - -OONODE = ootype.Instance('NODE', ootype.ROOT, {}) -OONODE._add_fields({'value': ootype.Signed, - 'floatval' : ootype.Float, - 'extra': ootype.Signed}) - # ____________________________________________________________ # Run 3: all the tests use lltype.malloc to make a NODE2 # (same as Run 2 but it is part of the OBJECT hierarchy) diff --git a/rpython/jit/metainterp/typesystem.py b/rpython/jit/metainterp/typesystem.py --- a/rpython/jit/metainterp/typesystem.py +++ b/rpython/jit/metainterp/typesystem.py @@ -1,29 +1,18 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rclass -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance, llstr, oostr +from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance, llstr from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr -from rpython.rtyper.annlowlevel import cast_instance_to_base_obj from rpython.jit.metainterp import history from rpython.jit.codewriter import heaptracker from rpython.rlib.objectmodel import r_dict, specialize def deref(T): - if isinstance(T, lltype.Ptr): - return T.TO - assert isinstance(T, ootype.OOType) - return T + assert isinstance(T, lltype.Ptr) + return T.TO + def fieldType(T, name): - if isinstance(T, lltype.Struct): - return getattr(T, name) - elif isinstance(T, (ootype.Instance, ootype.Record)): -## if name == '__class__': -## # XXX hack hack hack -## return ootype.Class - _, FIELD = T._lookup_field(name) - return FIELD - else: - assert False + assert isinstance(T, lltype.Struct) + return getattr(T, name) def arrayItem(ARRAY): try: diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -4,7 +4,6 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper import rvirtualizable2 from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.rclass import IR_IMMUTABLE_ARRAY, IR_IMMUTABLE @@ -14,9 +13,6 @@ def __init__(self, warmrunnerdesc, VTYPEPTR): self.warmrunnerdesc = warmrunnerdesc cpu = warmrunnerdesc.cpu - if cpu.ts.name == 'ootype': - import py - py.test.skip("ootype: fix virtualizables") self.cpu = cpu self.BoxArray = cpu.ts.BoxRef # @@ -45,8 +41,8 @@ for name in array_fields: ARRAYPTR = fieldType(VTYPE, name) ARRAY = deref(ARRAYPTR) - assert isinstance(ARRAYPTR, (lltype.Ptr, ootype.Array)) - assert isinstance(ARRAY, (lltype.GcArray, ootype.Array)) + assert isinstance(ARRAYPTR, lltype.Ptr) + assert isinstance(ARRAY, lltype.GcArray) ARRAYITEMTYPES.append(arrayItem(ARRAY)) self.array_descrs = [cpu.arraydescrof(deref(fieldType(VTYPE, name))) for name in array_fields] diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -12,7 +12,6 @@ from rpython.rtyper.annlowlevel import (hlstr, cast_base_ptr_to_instance, cast_object_to_ptr) from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rffi -from rpython.rtyper.ootypesystem import ootype # ____________________________________________________________ @@ -47,8 +46,6 @@ else: adr = llmemory.cast_ptr_to_adr(value) return heaptracker.adr2int(adr) - elif isinstance(lltype.typeOf(value), ootype.OOType): - return ootype.cast_to_object(value) elif isinstance(value, float): return longlong.getfloatstorage(value) else: @@ -63,8 +60,6 @@ return box.getref(TYPE) else: return llmemory.cast_adr_to_ptr(box.getaddr(), TYPE) - if isinstance(TYPE, ootype.OOType): - return box.getref(TYPE) if TYPE == lltype.Float: return box.getfloat() else: @@ -110,13 +105,11 @@ if isinstance(TYPE, lltype.Ptr): if TYPE.TO is rstr.STR or TYPE.TO is rstr.UNICODE: return rstr.LLHelpers.ll_streq(x, y) - if TYPE is ootype.String or TYPE is ootype.Unicode: - return x.ll_streq(y) return x == y @specialize.arg(0) def hash_whatever(TYPE, x): - # Hash of lltype or ootype object. + # Hash of lltype object. # Only supports strings, unicodes and regular instances, # as well as primitives that can meaningfully be cast to Signed. if isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc': @@ -127,13 +120,6 @@ return lltype.identityhash(x) else: return 0 - elif TYPE is ootype.String or TYPE is ootype.Unicode: - return x.ll_hash() - elif isinstance(TYPE, ootype.OOType): - if x: - return ootype.identityhash(x) - else: - return 0 else: return rffi.cast(lltype.Signed, x) @@ -548,8 +534,6 @@ else: if isinstance(BASEJITCELL, lltype.Ptr): cellref = lltype.malloc(BASEJITCELL.TO) - elif isinstance(BASEJITCELL, ootype.Instance): - cellref = ootype.new(BASEJITCELL) else: assert False, "no clue" lltohlhack[rtyper.type_system.deref(cellref)] = cell diff --git a/rpython/jit/tool/oparser_model.py b/rpython/jit/tool/oparser_model.py --- a/rpython/jit/tool/oparser_model.py +++ b/rpython/jit/tool/oparser_model.py @@ -25,11 +25,6 @@ from rpython.rtyper.lltypesystem import llmemory return adr2int(llmemory.cast_ptr_to_adr(obj)) - @staticmethod - def ootype_cast_to_object(obj): - from rpython.rtyper.ootypesystem import ootype - return ootype.cast_to_object(obj) - return LoopModel def get_mock_model(): From noreply at buildbot.pypy.org Wed Jul 10 18:23:31 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 10 Jul 2013 18:23:31 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: hg merge default Message-ID: <20130710162331.4EBC21C0325@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65322:fe974c808e9b Date: 2013-07-10 18:22 +0200 http://bitbucket.org/pypy/pypy/changeset/fe974c808e9b/ Log: hg merge default diff too long, truncating to 2000 out of 4822 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -306,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -322,8 +335,35 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,70 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture + +* Stacklet support on ARM + +* Various numpy improvements + +* Bugfixes to cffi and ctypes + +* Bugfixes to the stacklet support + +* Improved logging performance + +* Faster sets for objects + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,77 +2,13 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: ndarray-ptp -put and array.put +.. branch: fastjson +Fast json decoder written in RPython, about 3-4x faster than the pure Python +decoder which comes with the stdlib -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) - -.. branch: remove-array-smm -Remove multimethods in the arraymodule - -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback - -.. branch: remove-set-smm -Remove multi-methods on sets - -.. branch: numpy-subarrays -Implement subarrays for numpy - -.. branch: remove-dict-smm -Remove multi-methods on dict - -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative -.. branch: ndarray-round - -.. branch: faster-str-of-bigint -Improve performance of str(long). - -.. branch: ndarray-view -Add view to ndarray and zeroD arrays, not on dtype scalars yet - -.. branch: numpypy-segfault -fix segfault caused by iterating over empty ndarrays - -.. branch: identity-set -Faster sets for objects - -.. branch: inline-identityhash -Inline the fast path of id() and hash() +.. branch: improve-str2charp +Improve the performance of I/O writing up to 15% by using memcpy instead of +copying char-by-char in str2charp and get_nonmovingbuffer diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -881,15 +881,15 @@ assert "0 ('hi')" not in output.getvalue() def test_print_to(self): - exec """if 1: - from StringIO import StringIO - s = StringIO() - print >> s, "hi", "lovely!" - assert s.getvalue() == "hi lovely!\\n" - s = StringIO() - print >> s, "hi", "lovely!", - assert s.getvalue() == "hi lovely!" - """ in {} + exec """if 1: + from StringIO import StringIO + s = StringIO() + print >> s, "hi", "lovely!" + assert s.getvalue() == "hi lovely!\\n" + s = StringIO() + print >> s, "hi", "lovely!", + assert s.getvalue() == "hi lovely!" + """ in {} def test_assert_with_tuple_arg(self): try: diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -111,7 +111,7 @@ enc = None if need_encoding: - enc = encoding + enc = encoding v = PyString_DecodeEscape(space, substr, enc) return space.wrap(v) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -636,11 +636,11 @@ assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL if i < 5: - def bomb(*args): - assert False, "shortcutting should have avoided this" + def bomb(*args): + assert False, "shortcutting should have avoided this" - code.funcrun = bomb - code.funcrun_obj = bomb + code.funcrun = bomb + code.funcrun_obj = bomb args_w = map(space.wrap, range(i)) w_res = space.call_function(fn, *args_w) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -73,7 +73,7 @@ def f(): def f(y): - return x + y + return x + y return f x = 1 @@ -85,7 +85,7 @@ if n: x = 42 def f(y): - return x + y + return x + y return f g0 = f(0).func_closure[0] diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -102,26 +102,26 @@ } def pick_builtin(self, w_globals): - "Look up the builtin module to use from the __builtins__ global" - # pick the __builtins__ roughly in the same way CPython does it - # this is obscure and slow - space = self.space - try: - w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - else: - if w_builtin is space.builtin: # common case - return space.builtin - if space.isinstance_w(w_builtin, space.w_dict): + "Look up the builtin module to use from the __builtins__ global" + # pick the __builtins__ roughly in the same way CPython does it + # this is obscure and slow + space = self.space + try: + w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + else: + if w_builtin is space.builtin: # common case + return space.builtin + if space.isinstance_w(w_builtin, space.w_dict): return module.Module(space, None, w_builtin) - if isinstance(w_builtin, module.Module): - return w_builtin - # no builtin! make a default one. Give them None, at least. - builtin = module.Module(space, None) - space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) - return builtin + if isinstance(w_builtin, module.Module): + return w_builtin + # no builtin! make a default one. Give them None, at least. + builtin = module.Module(space, None) + space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) + return builtin def setup_after_space_initialization(self): """NOT_RPYTHON""" diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -52,7 +52,8 @@ HAS = rffi_platform.Has("setupterm") if rffi_platform.configure(CConfig)['HAS']: return eci - raise ImportError("failed to guess where ncurses is installed") + raise ImportError("failed to guess where ncurses is installed. " + "You might need to install libncurses5-dev or similar.") eci = guess_eci() diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/__init__.py @@ -0,0 +1,10 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """fast json implementation""" + + appleveldefs = {} + + interpleveldefs = { + 'loads' : 'interp_decoder.loads', + } diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -0,0 +1,404 @@ +import sys +import math +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize +from rpython.rlib import rfloat +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import unicodehelper +from rpython.rtyper.annlowlevel import llstr, hlunicode + +OVF_DIGITS = len(str(sys.maxint)) + +def is_whitespace(ch): + return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' + +# precomputing negative powers of 10 is MUCH faster than using e.g. math.pow +# at runtime +NEG_POW_10 = [10.0**-i for i in range(16)] +def neg_pow_10(x, exp): + if exp >= len(NEG_POW_10): + return 0.0 + return x * NEG_POW_10[exp] + +def strslice2unicode_latin1(s, start, end): + """ + Convert s[start:end] to unicode. s is supposed to be an RPython string + encoded in latin-1, which means that the numeric value of each char is the + same as the corresponding unicode code point. + + Internally it's implemented at the level of low-level helpers, to avoid + the extra copy we would need if we take the actual slice first. + + No bound checking is done, use carefully. + """ + from rpython.rtyper.annlowlevel import llstr, hlunicode + from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE + from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar + length = end-start + ll_s = llstr(s) + ll_res = malloc(UNICODE, length) + ll_res.hash = 0 + for i in range(length): + ch = ll_s.chars[start+i] + ll_res.chars[i] = cast_primitive(UniChar, ch) + return hlunicode(ll_res) + +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 +class JSONDecoder(object): + def __init__(self, space, s): + self.space = space + self.s = s + # we put our string in a raw buffer so: + # 1) we automatically get the '\0' sentinel at the end of the string, + # which means that we never have to check for the "end of string" + # 2) we can pass the buffer directly to strtod + self.ll_chars = rffi.str2charp(s) + self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + self.pos = 0 + self.last_type = TYPE_UNKNOWN + + def close(self): + rffi.free_charp(self.ll_chars) + lltype.free(self.end_ptr, flavor='raw') + + def getslice(self, start, end): + assert start >= 0 + assert end >= 0 + return self.s[start:end] + + def skip_whitespace(self, i): + while True: + ch = self.ll_chars[i] + if is_whitespace(ch): + i+=1 + else: + break + return i + + @specialize.arg(1) + def _raise(self, msg, *args): + raise operationerrfmt(self.space.w_ValueError, msg, *args) + + def decode_any(self, i): + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + if ch == '"': + return self.decode_string(i+1) + elif ch == '[': + return self.decode_array(i+1) + elif ch == '{': + return self.decode_object(i+1) + elif ch == 'n': + return self.decode_null(i+1) + elif ch == 't': + return self.decode_true(i+1) + elif ch == 'f': + return self.decode_false(i+1) + elif ch == 'I': + return self.decode_infinity(i+1) + elif ch == 'N': + return self.decode_nan(i+1) + elif ch == '-': + if self.ll_chars[i+1] == 'I': + return self.decode_infinity(i+2, sign=-1) + return self.decode_numeric(i) + elif ch.isdigit(): + return self.decode_numeric(i) + else: + self._raise("No JSON object could be decoded: unexpected '%s' at char %d", + ch, self.pos) + + def decode_null(self, i): + if (self.ll_chars[i] == 'u' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 'l'): + self.pos = i+3 + return self.space.w_None + self._raise("Error when decoding null at char %d", i) + + def decode_true(self, i): + if (self.ll_chars[i] == 'r' and + self.ll_chars[i+1] == 'u' and + self.ll_chars[i+2] == 'e'): + self.pos = i+3 + return self.space.w_True + self._raise("Error when decoding true at char %d", i) + + def decode_false(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 's' and + self.ll_chars[i+3] == 'e'): + self.pos = i+4 + return self.space.w_False + self._raise("Error when decoding false at char %d", i) + + def decode_infinity(self, i, sign=1): + if (self.ll_chars[i] == 'n' and + self.ll_chars[i+1] == 'f' and + self.ll_chars[i+2] == 'i' and + self.ll_chars[i+3] == 'n' and + self.ll_chars[i+4] == 'i' and + self.ll_chars[i+5] == 't' and + self.ll_chars[i+6] == 'y'): + self.pos = i+7 + return self.space.wrap(rfloat.INFINITY * sign) + self._raise("Error when decoding Infinity at char %d", i) + + def decode_nan(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'N'): + self.pos = i+2 + return self.space.wrap(rfloat.NAN) + self._raise("Error when decoding NaN at char %d", i) + + def decode_numeric(self, i): + start = i + i, ovf_maybe, intval = self.parse_integer(i) + # + # check for the optional fractional part + ch = self.ll_chars[i] + if ch == '.': + if not self.ll_chars[i+1].isdigit(): + self._raise("Expected digit at char %d", i+1) + return self.decode_float(start) + elif ch == 'e' or ch == 'E': + return self.decode_float(start) + elif ovf_maybe: + return self.decode_int_slow(start) + + self.pos = i + return self.space.wrap(intval) + + def decode_float(self, i): + from rpython.rlib import rdtoa + start = rffi.ptradd(self.ll_chars, i) + floatval = rdtoa.dg_strtod(start, self.end_ptr) + diff = rffi.cast(rffi.LONG, self.end_ptr[0]) - rffi.cast(rffi.LONG, start) + self.pos = i + diff + return self.space.wrap(floatval) + + def decode_int_slow(self, i): + start = i + if self.ll_chars[i] == '-': + i += 1 + while self.ll_chars[i].isdigit(): + i += 1 + s = self.getslice(start, i) + self.pos = i + return self.space.call_function(self.space.w_int, self.space.wrap(s)) + + def parse_integer(self, i): + "Parse a decimal number with an optional minus sign" + sign = 1 + # parse the sign + if self.ll_chars[i] == '-': + sign = -1 + i += 1 + elif self.ll_chars[i] == '+': + i += 1 + # + if self.ll_chars[i] == '0': + i += 1 + return i, False, 0 + + intval = 0 + start = i + while True: + ch = self.ll_chars[i] + if ch.isdigit(): + intval = intval*10 + ord(ch)-ord('0') + i += 1 + else: + break + count = i - start + if count == 0: + self._raise("Expected digit at char %d", i) + # if the number has more digits than OVF_DIGITS, it might have + # overflowed + ovf_maybe = (count >= OVF_DIGITS) + return i, ovf_maybe, sign * intval + parse_integer._always_inline_ = True + + def decode_array(self, i): + w_list = self.space.newlist([]) + start = i + count = 0 + i = self.skip_whitespace(start) + if self.ll_chars[i] == ']': + self.pos = i+1 + return w_list + # + while True: + w_item = self.decode_any(i) + i = self.pos + self.space.call_method(w_list, 'append', w_item) + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + i += 1 + if ch == ']': + self.pos = i + return w_list + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated array starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding array (char %d)", + ch, self.pos) + + def decode_object(self, i): + start = i + w_dict = self.space.newdict() + # + i = self.skip_whitespace(i) + if self.ll_chars[i] == '}': + self.pos = i+1 + return w_dict + # + while True: + # parse a key: value + self.last_type = TYPE_UNKNOWN + w_name = self.decode_any(i) + if self.last_type != TYPE_STRING: + self._raise("Key name must be string for object starting at char %d", start) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + if ch != ':': + self._raise("No ':' found at char %d", i) + i += 1 + i = self.skip_whitespace(i) + # + w_value = self.decode_any(i) + self.space.setitem(w_dict, w_name, w_value) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + i += 1 + if ch == '}': + self.pos = i + return w_dict + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated object starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding object (char %d)", + ch, self.pos) + + + def decode_string(self, i): + start = i + bits = 0 + while True: + # this loop is a fast path for strings which do not contain escape + # characters + ch = self.ll_chars[i] + i += 1 + bits |= ord(ch) + if ch == '"': + if bits & 0x80: + # the 8th bit is set, it's an utf8 strnig + content_utf8 = self.getslice(start, i-1) + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + else: + # ascii only, fast path (ascii is a strict subset of + # latin1, and we already checked that all the chars are < + # 128) + content_unicode = strslice2unicode_latin1(self.s, start, i-1) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + content_so_far = self.getslice(start, i-1) + self.pos = i-1 + return self.decode_string_escaped(start, content_so_far) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + + + def decode_string_escaped(self, start, content_so_far): + builder = StringBuilder(len(content_so_far)*2) # just an estimate + builder.append(content_so_far) + i = self.pos + while True: + ch = self.ll_chars[i] + i += 1 + if ch == '"': + content_utf8 = builder.build() + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + i = self.decode_escape_sequence(i, builder) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + else: + builder.append_multiple_char(ch, 1) # we should implement append_char + + def decode_escape_sequence(self, i, builder): + ch = self.ll_chars[i] + i += 1 + put = builder.append_multiple_char + if ch == '\\': put('\\', 1) + elif ch == '"': put('"' , 1) + elif ch == '/': put('/' , 1) + elif ch == 'b': put('\b', 1) + elif ch == 'f': put('\f', 1) + elif ch == 'n': put('\n', 1) + elif ch == 'r': put('\r', 1) + elif ch == 't': put('\t', 1) + elif ch == 'u': + return self.decode_escape_sequence_unicode(i, builder) + else: + self._raise("Invalid \\escape: %s (char %d)", ch, self.pos-1) + return i + + def decode_escape_sequence_unicode(self, i, builder): + # at this point we are just after the 'u' of the \u1234 sequence. + start = i + i += 4 + hexdigits = self.getslice(start, i) + try: + val = int(hexdigits, 16) + if val & 0xfc00 == 0xd800: + # surrogate pair + val = self.decode_surrogate_pair(i, val) + i += 6 + except ValueError: + self._raise("Invalid \uXXXX escape (char %d)", i-1) + return # help the annotator to know that we'll never go beyond + # this point + # + uchr = unichr(val) + utf8_ch = unicodehelper.encode_utf8(self.space, uchr) + builder.append(utf8_ch) + return i + + def decode_surrogate_pair(self, i, highsurr): + if self.ll_chars[i] != '\\' or self.ll_chars[i+1] != 'u': + self._raise("Unpaired high surrogate at char %d", i) + i += 2 + hexdigits = self.getslice(i, i+4) + lowsurr = int(hexdigits, 16) # the possible ValueError is caugth by the caller + return 0x10000 + (((highsurr - 0xd800) << 10) | (lowsurr - 0xdc00)) + +def loads(space, w_s): + if space.isinstance_w(w_s, space.w_unicode): + raise OperationError(space.w_TypeError, + space.wrap("Expected utf8-encoded str, got unicode")) + s = space.str_w(w_s) + decoder = JSONDecoder(space, s) + try: + w_res = decoder.decode_any(0) + i = decoder.skip_whitespace(decoder.pos) + if i < len(s): + start = i + end = len(s) - 1 + raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) + return w_res + finally: + decoder.close() diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/targetjson.py @@ -0,0 +1,143 @@ +import sys +import py +ROOT = py.path.local(__file__).dirpath('..', '..', '..') +sys.path.insert(0, str(ROOT)) + +import time +from rpython.rlib.streamio import open_file_as_stream +from pypy.interpreter.error import OperationError +from pypy.module._pypyjson.interp_decoder import loads + + + +## MSG = open('msg.json').read() + +class W_Root(object): + pass + +class W_Dict(W_Root): + def __init__(self): + self.dictval = {} + +class W_Unicode(W_Root): + def __init__(self, x): + self.unival = x + +class W_String(W_Root): + def __init__(self, x): + self.strval = x + +class W_Int(W_Root): + def __init__(self, x): + self.intval = x + +class W_Float(W_Root): + def __init__(self, x): + self.floatval = x + +class W_List(W_Root): + def __init__(self): + self.listval = [] + +class W_Singleton(W_Root): + def __init__(self, name): + self.name = name + +class FakeSpace(object): + + w_None = W_Singleton('None') + w_True = W_Singleton('True') + w_False = W_Singleton('False') + w_ValueError = W_Singleton('ValueError') + w_UnicodeDecodeError = W_Singleton('UnicodeDecodeError') + w_unicode = W_Unicode + w_int = W_Int + w_float = W_Float + + def newtuple(self, items): + return None + + def newdict(self): + return W_Dict() + + def newlist(self, items): + return W_List() + + def isinstance_w(self, w_x, w_type): + return isinstance(w_x, w_type) + + def str_w(self, w_x): + assert isinstance(w_x, W_String) + return w_x.strval + + def call_method(self, obj, name, arg): + assert name == 'append' + assert isinstance(obj, W_List) + obj.listval.append(arg) + call_method._dont_inline_ = True + + def call_function(self, w_func, *args_w): + return self.w_None # XXX + + def setitem(self, d, key, value): + assert isinstance(d, W_Dict) + assert isinstance(key, W_Unicode) + d.dictval[key.unival] = value + + def wrapunicode(self, x): + return W_Unicode(x) + + def wrapint(self, x): + return W_Int(x) + + def wrapfloat(self, x): + return W_Float(x) + + def wrap(self, x): + if isinstance(x, int): + return W_Int(x) + elif isinstance(x, float): + return W_Float(x) + ## elif isinstance(x, str): + ## assert False + else: + return W_Unicode(unicode(x)) + wrap._annspecialcase_ = "specialize:argtype(1)" + + +fakespace = FakeSpace() + +def myloads(msg): + return loads(fakespace, W_String(msg)) + + +def bench(title, N, fn, arg): + a = time.clock() + for i in range(N): + res = fn(arg) + b = time.clock() + print title, (b-a) / N * 1000 + +def entry_point(argv): + if len(argv) != 3: + print 'Usage: %s FILE n' % argv[0] + return 1 + filename = argv[1] + N = int(argv[2]) + f = open_file_as_stream(filename) + msg = f.readall() + + try: + bench('loads ', N, myloads, msg) + except OperationError, e: + print 'Error', e._compute_value(fakespace) + + return 0 + +# _____ Define and setup target ___ + +def target(*args): + return entry_point, None + +if __name__ == '__main__': + entry_point(sys.argv) diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -0,0 +1,188 @@ +# -*- encoding: utf-8 -*- +import py +from pypy.module._pypyjson.interp_decoder import JSONDecoder + +def test_skip_whitespace(): + s = ' hello ' + dec = JSONDecoder('fake space', s) + assert dec.pos == 0 + assert dec.skip_whitespace(0) == 3 + assert dec.skip_whitespace(3) == 3 + assert dec.skip_whitespace(8) == len(s) + dec.close() + + + +class AppTest(object): + spaceconfig = {"objspace.usemodules._pypyjson": True} + + def test_raise_on_unicode(self): + import _pypyjson + raises(TypeError, _pypyjson.loads, u"42") + + + def test_decode_constants(self): + import _pypyjson + assert _pypyjson.loads('null') is None + raises(ValueError, _pypyjson.loads, 'nul') + raises(ValueError, _pypyjson.loads, 'nu') + raises(ValueError, _pypyjson.loads, 'n') + raises(ValueError, _pypyjson.loads, 'nuXX') + # + assert _pypyjson.loads('true') is True + raises(ValueError, _pypyjson.loads, 'tru') + raises(ValueError, _pypyjson.loads, 'tr') + raises(ValueError, _pypyjson.loads, 't') + raises(ValueError, _pypyjson.loads, 'trXX') + # + assert _pypyjson.loads('false') is False + raises(ValueError, _pypyjson.loads, 'fals') + raises(ValueError, _pypyjson.loads, 'fal') + raises(ValueError, _pypyjson.loads, 'fa') + raises(ValueError, _pypyjson.loads, 'f') + raises(ValueError, _pypyjson.loads, 'falXX') + + + def test_decode_string(self): + import _pypyjson + res = _pypyjson.loads('"hello"') + assert res == u'hello' + assert type(res) is unicode + + def test_decode_string_utf8(self): + import _pypyjson + s = u'àèìòù' + res = _pypyjson.loads('"%s"' % s.encode('utf-8')) + assert res == s + + def test_skip_whitespace(self): + import _pypyjson + s = ' "hello" ' + assert _pypyjson.loads(s) == u'hello' + s = ' "hello" extra' + raises(ValueError, "_pypyjson.loads(s)") + + def test_unterminated_string(self): + import _pypyjson + s = '"hello' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence(self): + import _pypyjson + assert _pypyjson.loads(r'"\\"') == u'\\' + assert _pypyjson.loads(r'"\""') == u'"' + assert _pypyjson.loads(r'"\/"') == u'/' + assert _pypyjson.loads(r'"\b"') == u'\b' + assert _pypyjson.loads(r'"\f"') == u'\f' + assert _pypyjson.loads(r'"\n"') == u'\n' + assert _pypyjson.loads(r'"\r"') == u'\r' + assert _pypyjson.loads(r'"\t"') == u'\t' + + def test_escape_sequence_in_the_middle(self): + import _pypyjson + s = r'"hello\nworld"' + assert _pypyjson.loads(s) == "hello\nworld" + + def test_unterminated_string_after_escape_sequence(self): + import _pypyjson + s = r'"hello\nworld' # missing the trailing " + raises(ValueError, "_pypyjson.loads(s)") + + def test_escape_sequence_unicode(self): + import _pypyjson + s = r'"\u1234"' + assert _pypyjson.loads(s) == u'\u1234' + + def test_invalid_utf_8(self): + import _pypyjson + s = '"\xe0"' # this is an invalid UTF8 sequence inside a string + raises(UnicodeDecodeError, "_pypyjson.loads(s)") + + def test_decode_numeric(self): + import sys + import _pypyjson + def check(s, val): + res = _pypyjson.loads(s) + assert type(res) is type(val) + assert res == val + # + check('42', 42) + check('-42', -42) + check('42.123', 42.123) + check('42E0', 42.0) + check('42E3', 42000.0) + check('42E-1', 4.2) + check('42E+1', 420.0) + check('42.123E3', 42123.0) + check('0', 0) + check('-0', 0) + check('0.123', 0.123) + check('0E3', 0.0) + check('5E0001', 50.0) + check(str(1 << 32), 1 << 32) + check(str(1 << 64), 1 << 64) + # + x = str(sys.maxint+1) + '.123' + check(x, float(x)) + x = str(sys.maxint+1) + 'E1' + check(x, float(x)) + x = str(sys.maxint+1) + 'E-1' + check(x, float(x)) + # + check('1E400', float('inf')) + ## # these are non-standard but supported by CPython json + check('Infinity', float('inf')) + check('-Infinity', float('-inf')) + + def test_nan(self): + import math + import _pypyjson + res = _pypyjson.loads('NaN') + assert math.isnan(res) + + def test_decode_numeric_invalid(self): + import _pypyjson + def error(s): + raises(ValueError, _pypyjson.loads, s) + # + error(' 42 abc') + error('.123') + error('+123') + error('12.') + error('12.-3') + error('12E') + error('12E-') + error('0123') # numbers can't start with 0 + + def test_decode_object(self): + import _pypyjson + assert _pypyjson.loads('{}') == {} + assert _pypyjson.loads('{ }') == {} + # + s = '{"hello": "world", "aaa": "bbb"}' + assert _pypyjson.loads(s) == {'hello': 'world', + 'aaa': 'bbb'} + raises(ValueError, _pypyjson.loads, '{"key"') + raises(ValueError, _pypyjson.loads, '{"key": 42') + + def test_decode_object_nonstring_key(self): + import _pypyjson + raises(ValueError, "_pypyjson.loads('{42: 43}')") + + def test_decode_array(self): + import _pypyjson + assert _pypyjson.loads('[]') == [] + assert _pypyjson.loads('[ ]') == [] + assert _pypyjson.loads('[1]') == [1] + assert _pypyjson.loads('[1, 2]') == [1, 2] + raises(ValueError, "_pypyjson.loads('[1: 2]')") + raises(ValueError, "_pypyjson.loads('[1, 2')") + raises(ValueError, """_pypyjson.loads('["extra comma",]')""") + + def test_unicode_surrogate_pair(self): + import _pypyjson + expected = u'z\U0001d120x' + res = _pypyjson.loads('"z\\ud834\\udd20x"') + assert res == expected + + diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -86,20 +86,20 @@ (not _MS_WINDOWS or fieldsize * 8 == last_size) and fieldsize * 8 <= last_size and bitoffset + bitsize <= last_size): - # continue bit field - field_type = CONT_BITFIELD + # continue bit field + field_type = CONT_BITFIELD elif (not _MS_WINDOWS and last_size and # we have a bitfield open fieldsize * 8 >= last_size and bitoffset + bitsize <= fieldsize * 8): - # expand bit field - field_type = EXPAND_BITFIELD + # expand bit field + field_type = EXPAND_BITFIELD else: - # start new bitfield - field_type = NEW_BITFIELD - has_bitfield = True - bitoffset = 0 - last_size = fieldsize * 8 + # start new bitfield + field_type = NEW_BITFIELD + has_bitfield = True + bitoffset = 0 + last_size = fieldsize * 8 if is_union: pos.append(0) diff --git a/pypy/module/cppyy/bench/hsimple.py b/pypy/module/cppyy/bench/hsimple.py --- a/pypy/module/cppyy/bench/hsimple.py +++ b/pypy/module/cppyy/bench/hsimple.py @@ -37,7 +37,7 @@ import random if _reflex: - gROOT.SetBatch(True) + gROOT.SetBatch(True) # Create a new ROOT binary machine independent file. # Note that this file may contain any kind of ROOT objects, histograms, diff --git a/pypy/module/cppyy/bench/hsimple_rflx.py b/pypy/module/cppyy/bench/hsimple_rflx.py --- a/pypy/module/cppyy/bench/hsimple_rflx.py +++ b/pypy/module/cppyy/bench/hsimple_rflx.py @@ -80,14 +80,14 @@ for i in xrange(2500000): # Generate random values. # px, py = random.gauss(0, 1), random.gauss(0, 1) - px, py = random.Gaus(0, 1), random.Gaus(0, 1) + px, py = random.Gaus(0, 1), random.Gaus(0, 1) # pt = (px*px + py*py)**0.5 - pt = math.sqrt(px*px + py*py) + pt = math.sqrt(px*px + py*py) # pt = (px*px + py*py) # random = rndm(1) # Fill histograms. - hpx.Fill(pt) + hpx.Fill(pt) # hpxpyFill( px, py ) # hprofFill( px, pz ) # ntupleFill( px, py, pz, random, i ) @@ -105,7 +105,7 @@ #gBenchmark.Show( 'hsimple' ) -hpx.Print() +hpx.Print() # Save all objects in this file. #hpx.SetFillColor( 0 ) diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -542,7 +542,7 @@ # TODO: get the capi-identify test selection right ... if self.capi_identity != 'CINT': # don't test anything for Reflex - return + return import cppyy diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -131,11 +131,11 @@ # TVectorF is a typedef of floats v = cppyy.gbl.TVectorF(N) for i in range(N): - v[i] = i*i + v[i] = i*i assert len(v) == N for j in v: - assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0. + assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0. class AppTestCINTTTREE: diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -97,9 +97,9 @@ assert hasattr(v, 'end' ) for i in range(self.N): - v.push_back(cppyy.gbl.just_a_class()) - v[i].m_i = i - assert v[i].m_i == i + v.push_back(cppyy.gbl.just_a_class()) + v[i].m_i = i + assert v[i].m_i == i assert len(v) == self.N v.destruct() @@ -332,7 +332,7 @@ a = std.list(int)() for arg in a: - pass + pass class AppTestSTLMAP: @@ -395,7 +395,7 @@ m = std.map(int, int)() for key, value in m: - pass + pass def test04_unsignedvalue_typemap_types(self): """Test assignability of maps with unsigned value types""" diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -225,9 +225,9 @@ if w_newpart is None or not PyString_Check(space, ref[0]) or \ not PyString_Check(space, w_newpart): - Py_DecRef(space, ref[0]) - ref[0] = lltype.nullptr(PyObject.TO) - return + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + return w_str = from_ref(space, ref[0]) w_newstr = space.add(w_str, w_newpart) Py_DecRef(space, ref[0]) diff --git a/pypy/module/oracle/interp_pool.py b/pypy/module/oracle/interp_pool.py --- a/pypy/module/oracle/interp_pool.py +++ b/pypy/module/oracle/interp_pool.py @@ -130,14 +130,14 @@ self.checkConnected(space) if __args__.keywords: - keywords = __args__.keywords + ["pool"] + keywords = __args__.keywords + ["pool"] else: - keywords = ["pool"] + keywords = ["pool"] if __args__.keywords_w: - keywords_w = __args__.keywords_w + [space.wrap(self)] + keywords_w = __args__.keywords_w + [space.wrap(self)] else: - keywords_w = [space.wrap(self)] - + keywords_w = [space.wrap(self)] + newargs = Arguments(space, __args__.arguments_w, keywords, diff --git a/pypy/module/parser/__init__.py b/pypy/module/parser/__init__.py --- a/pypy/module/parser/__init__.py +++ b/pypy/module/parser/__init__.py @@ -2,28 +2,27 @@ class Module(MixedModule): - """The builtin parser module.""" + """The builtin parser module.""" - applevel_name = 'parser' + applevel_name = 'parser' - appleveldefs = { - } + appleveldefs = { + } - interpleveldefs = { - '__name__' : '(space.wrap("parser"))', - '__doc__' : '(space.wrap("parser module"))', - - 'suite' : 'pyparser.suite', - 'expr' : 'pyparser.expr', - 'issuite' : 'pyparser.issuite', - 'isexpr' : 'pyparser.isexpr', - 'STType' : 'pyparser.W_STType', - 'ast2tuple' : 'pyparser.st2tuple', - 'st2tuple' : 'pyparser.st2tuple', - 'ast2list' : 'pyparser.st2list', - 'ast2tuple' : 'pyparser.st2tuple', - 'ASTType' : 'pyparser.W_STType', - 'compilest' : 'pyparser.compilest', - 'compileast' : 'pyparser.compilest', - 'ParserError' : 'space.new_exception_class("parser.ParserError")', - } + interpleveldefs = { + '__name__' : '(space.wrap("parser"))', + '__doc__' : '(space.wrap("parser module"))', + 'suite' : 'pyparser.suite', + 'expr' : 'pyparser.expr', + 'issuite' : 'pyparser.issuite', + 'isexpr' : 'pyparser.isexpr', + 'STType' : 'pyparser.W_STType', + 'ast2tuple' : 'pyparser.st2tuple', + 'st2tuple' : 'pyparser.st2tuple', + 'ast2list' : 'pyparser.st2list', + 'ast2tuple' : 'pyparser.st2tuple', + 'ASTType' : 'pyparser.W_STType', + 'compilest' : 'pyparser.compilest', + 'compileast' : 'pyparser.compilest', + 'ParserError' : 'space.new_exception_class("parser.ParserError")', + } diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py --- a/pypy/module/parser/pyparser.py +++ b/pypy/module/parser/pyparser.py @@ -75,7 +75,7 @@ info = pyparse.CompileInfo("", mode) parser = pyparse.PythonParser(space) try: - tree = parser.parse_source(source, info) + tree = parser.parse_source(source, info) except error.IndentationError, e: raise OperationError(space.w_IndentationError, e.wrap_info(space)) diff --git a/pypy/module/pyexpat/test/test_build.py b/pypy/module/pyexpat/test/test_build.py --- a/pypy/module/pyexpat/test/test_build.py +++ b/pypy/module/pyexpat/test/test_build.py @@ -12,7 +12,7 @@ py.test.skip("No module expat") try: - from pypy.module.pyexpat import interp_pyexpat + from pypy.module.pyexpat import interp_pyexpat except (ImportError, CompilationError): py.test.skip("Expat not installed") diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -63,7 +63,7 @@ offset = {} for i, op in enumerate(oplist): if i != 1: - offset[op] = i + offset[op] = i token = JitCellToken() token.number = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -131,23 +131,24 @@ def has_id(self, id): return id in self.ids - def _ops_for_chunk(self, chunk, include_debug_merge_points): + def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point' or include_debug_merge_points: + if op.name != 'debug_merge_point' and \ + (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op - def _allops(self, include_debug_merge_points=False, opcode=None): + def _allops(self, opcode=None, include_guard_not_invalidated=True): opcode_name = opcode for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode_name is None or \ (opcode and opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: - for op in chunk.operations: - if op.name == 'label': - yield op + for op in chunk.operations: + if op.name == 'label': + yield op def allops(self, *args, **kwds): return list(self._allops(*args, **kwds)) @@ -162,15 +163,15 @@ def print_ops(self, *args, **kwds): print self.format_ops(*args, **kwds) - def _ops_by_id(self, id, include_debug_merge_points=False, opcode=None): + def _ops_by_id(self, id, include_guard_not_invalidated=True, opcode=None): opcode_name = opcode target_opcodes = self.ids[id] - loop_ops = self.allops(include_debug_merge_points, opcode) + loop_ops = self.allops(opcode) for chunk in self.flatten_chunks(): opcode = chunk.getopcode() if opcode in target_opcodes and (opcode_name is None or opcode.__class__.__name__ == opcode_name): - for op in self._ops_for_chunk(chunk, include_debug_merge_points): + for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): if op in loop_ops: yield op diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -548,10 +548,10 @@ log = self.run(f, import_site=True) loop, = log.loops_by_id('ntohs') assert loop.match_by_id('ntohs', """ - guard_not_invalidated(descr=...) p12 = call(ConstClass(ntohs), 1, descr=...) guard_no_exception(descr=...) - """) + """, + include_guard_not_invalidated=False) # py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -105,7 +105,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) f13 = getarrayitem_raw(i8, i6, descr=) @@ -117,7 +116,7 @@ i20 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_array_of_floats(self): try: @@ -142,7 +141,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) i13 = getarrayitem_raw(i8, i6, descr=) @@ -157,7 +155,7 @@ i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_zeropadded(self): diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -223,5 +223,5 @@ log = self.run(main, [1000]) assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) - ops = loop.ops_by_id('getitem') + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) assert log.opnames(ops) == [] diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr), p25, 16, ConstPtr(ptr70), descr=) + p93 = call(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -209,12 +209,12 @@ tzname = ["", ""] if _WIN: - c_tzset() - timezone = c_get_timezone() - altzone = timezone - 3600 - daylight = c_get_daylight() - tzname_ptr = c_get_tzname() - tzname = rffi.charp2str(tzname_ptr[0]), rffi.charp2str(tzname_ptr[1]) + c_tzset() + timezone = c_get_timezone() + altzone = timezone - 3600 + daylight = c_get_daylight() + tzname_ptr = c_get_tzname() + tzname = rffi.charp2str(tzname_ptr[0]), rffi.charp2str(tzname_ptr[1]) if _POSIX: if _CYGWIN: diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -91,13 +91,13 @@ next(cur) def test_cursor_after_close(con): - cur = con.execute('select 1') - cur.close() - con.close() - pytest.raises(_sqlite3.ProgrammingError, "cur.close()") - # raises ProgrammingError because should check closed before check args - pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)") - pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)") + cur = con.execute('select 1') + cur.close() + con.close() + pytest.raises(_sqlite3.ProgrammingError, "cur.close()") + # raises ProgrammingError because should check closed before check args + pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)") + pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)") @pytest.mark.skipif("not hasattr(sys, 'pypy_translation_info')") def test_connection_del(tmpdir): diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -379,13 +379,13 @@ if _check_notimplemented(space, w_res): return w_res if w_right_impl is not None: - if space.is_w(w_obj3, space.w_None): - w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1) - else: - w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1, + if space.is_w(w_obj3, space.w_None): + w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1) + else: + w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1, w_obj3) - if _check_notimplemented(space, w_res): - return w_res + if _check_notimplemented(space, w_res): + return w_res raise OperationError(space.w_TypeError, space.wrap("operands do not support **")) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -708,7 +708,7 @@ return MapDictIteratorValues(self.space, self, w_dict) def iteritems(self, w_dict): return MapDictIteratorItems(self.space, self, w_dict) - + def materialize_r_dict(space, obj, dict_w): map = obj._get_mapdict_map() @@ -716,69 +716,69 @@ _become(obj, new_obj) class MapDictIteratorKeys(BaseKeyIterator): - def __init__(self, space, strategy, dictimplementation): - BaseKeyIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseKeyIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_key_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr - return None + def next_key_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr + return None + class MapDictIteratorValues(BaseValueIterator): - def __init__(self, space, strategy, dictimplementation): - BaseValueIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseValueIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_value_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - return self.w_obj.getdictvalue(self.space, attr) - return None + def next_value_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + return self.w_obj.getdictvalue(self.space, attr) + return None + class MapDictIteratorItems(BaseItemIterator): - def __init__(self, space, strategy, dictimplementation): - BaseItemIterator.__init__( - self, space, strategy, dictimplementation) - w_obj = strategy.unerase(dictimplementation.dstorage) - self.w_obj = w_obj - self.orig_map = self.curr_map = w_obj._get_mapdict_map() + def __init__(self, space, strategy, dictimplementation): + BaseItemIterator.__init__(self, space, strategy, dictimplementation) + w_obj = strategy.unerase(dictimplementation.dstorage) + self.w_obj = w_obj + self.orig_map = self.curr_map = w_obj._get_mapdict_map() - def next_item_entry(self): - implementation = self.dictimplementation - assert isinstance(implementation.strategy, MapDictStrategy) - if self.orig_map is not self.w_obj._get_mapdict_map(): - return None, None - if self.curr_map: - curr_map = self.curr_map.search(DICT) - if curr_map: - self.curr_map = curr_map.back - attr = curr_map.selector[0] - w_attr = self.space.wrap(attr) - return w_attr, self.w_obj.getdictvalue(self.space, attr) - return None, None + def next_item_entry(self): + implementation = self.dictimplementation + assert isinstance(implementation.strategy, MapDictStrategy) + if self.orig_map is not self.w_obj._get_mapdict_map(): + return None, None + if self.curr_map: + curr_map = self.curr_map.search(DICT) + if curr_map: + self.curr_map = curr_map.back + attr = curr_map.selector[0] + w_attr = self.space.wrap(attr) + return w_attr, self.w_obj.getdictvalue(self.space, attr) + return None, None + # ____________________________________________________________ # Magic caching @@ -860,7 +860,7 @@ # selector = ("", INVALID) if w_descr is None: - selector = (name, DICT) #common case: no such attr in the class + selector = (name, DICT) # common case: no such attr in the class elif isinstance(w_descr, TypeCell): pass # we have a TypeCell in the class: give up elif space.is_data_descr(w_descr): @@ -890,7 +890,6 @@ LOAD_ATTR_slowpath._dont_inline_ = True def LOOKUP_METHOD_mapdict(f, nameindex, w_obj): - space = f.space pycode = f.getcode() entry = pycode._mapdict_caches[nameindex] if entry.is_valid_for_obj(w_obj): diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -539,11 +539,11 @@ if left: #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars) while lpos < rpos and u_self[lpos].isspace(): - lpos += 1 + lpos += 1 if right: while rpos > lpos and u_self[rpos - 1].isspace(): - rpos -= 1 + rpos -= 1 assert rpos >= lpos # annotator hint, don't remove return sliced(space, u_self, lpos, rpos, w_self) diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -24,150 +24,150 @@ """) def test_mix_classes(self): - @self.retry - def run(): - import __pypy__ - class A(object): - def f(self): - return 42 From noreply at buildbot.pypy.org Wed Jul 10 18:46:30 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 10 Jul 2013 18:46:30 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130710164630.86DCC1C35EB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65323:b738f4cb540d Date: 2013-07-10 09:45 -0700 http://bitbucket.org/pypy/pypy/changeset/b738f4cb540d/ Log: merge default diff too long, truncating to 2000 out of 3209 lines diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -18,16 +18,20 @@ ========== * Bugfixes to the ARM JIT backend, so that ARM is now an officially - supported processor architecture. + supported processor architecture -* Various numpy improvements. +* Stacklet support on ARM -* Bugfixes to cffi and ctypes. +* Various numpy improvements + +* Bugfixes to cffi and ctypes * Bugfixes to the stacklet support * Improved logging performance +* Faster sets for objects + What is PyPy? ============= diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,77 +2,13 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: ndarray-ptp -put and array.put +.. branch: fastjson +Fast json decoder written in RPython, about 3-4x faster than the pure Python +decoder which comes with the stdlib -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) - -.. branch: remove-array-smm -Remove multimethods in the arraymodule - -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback - -.. branch: remove-set-smm -Remove multi-methods on sets - -.. branch: numpy-subarrays -Implement subarrays for numpy - -.. branch: remove-dict-smm -Remove multi-methods on dict - -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative -.. branch: ndarray-round - -.. branch: faster-str-of-bigint -Improve performance of str(long). - -.. branch: ndarray-view -Add view to ndarray and zeroD arrays, not on dtype scalars yet - -.. branch: numpypy-segfault -fix segfault caused by iterating over empty ndarrays - -.. branch: identity-set -Faster sets for objects - -.. branch: inline-identityhash -Inline the fast path of id() and hash() +.. branch: improve-str2charp +Improve the performance of I/O writing up to 15% by using memcpy instead of +copying char-by-char in str2charp and get_nonmovingbuffer diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -598,11 +598,11 @@ assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL if i < 5: - def bomb(*args): - assert False, "shortcutting should have avoided this" + def bomb(*args): + assert False, "shortcutting should have avoided this" - code.funcrun = bomb - code.funcrun_obj = bomb + code.funcrun = bomb + code.funcrun_obj = bomb args_w = map(space.wrap, range(i)) w_res = space.call_function(fn, *args_w) diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -72,7 +72,7 @@ def f(): def f(y): - return x + y + return x + y return f x = 1 @@ -84,7 +84,7 @@ if n: x = n def f(y): - return x + y + return x + y return f empty_cell_1 = f(0).__closure__[0] diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -88,26 +88,26 @@ } def pick_builtin(self, w_globals): - "Look up the builtin module to use from the __builtins__ global" - # pick the __builtins__ roughly in the same way CPython does it - # this is obscure and slow - space = self.space - try: - w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - else: - if w_builtin is space.builtin: # common case - return space.builtin - if space.isinstance_w(w_builtin, space.w_dict): + "Look up the builtin module to use from the __builtins__ global" + # pick the __builtins__ roughly in the same way CPython does it + # this is obscure and slow + space = self.space + try: + w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + else: + if w_builtin is space.builtin: # common case + return space.builtin + if space.isinstance_w(w_builtin, space.w_dict): return module.Module(space, None, w_builtin) - if isinstance(w_builtin, module.Module): - return w_builtin - # no builtin! make a default one. Give them None, at least. - builtin = module.Module(space, None) - space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) - return builtin + if isinstance(w_builtin, module.Module): + return w_builtin + # no builtin! make a default one. Give them None, at least. + builtin = module.Module(space, None) + space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) + return builtin def setup_after_space_initialization(self): """NOT_RPYTHON""" diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -52,7 +52,8 @@ HAS = rffi_platform.Has("setupterm") if rffi_platform.configure(CConfig)['HAS']: return eci - raise ImportError("failed to guess where ncurses is installed") + raise ImportError("failed to guess where ncurses is installed. " + "You might need to install libncurses5-dev or similar.") eci = guess_eci() diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -86,20 +86,20 @@ (not _MS_WINDOWS or fieldsize * 8 == last_size) and fieldsize * 8 <= last_size and bitoffset + bitsize <= last_size): - # continue bit field - field_type = CONT_BITFIELD + # continue bit field + field_type = CONT_BITFIELD elif (not _MS_WINDOWS and last_size and # we have a bitfield open fieldsize * 8 >= last_size and bitoffset + bitsize <= fieldsize * 8): - # expand bit field - field_type = EXPAND_BITFIELD + # expand bit field + field_type = EXPAND_BITFIELD else: - # start new bitfield - field_type = NEW_BITFIELD - has_bitfield = True - bitoffset = 0 - last_size = fieldsize * 8 + # start new bitfield + field_type = NEW_BITFIELD + has_bitfield = True + bitoffset = 0 + last_size = fieldsize * 8 if is_union: pos.append(0) diff --git a/pypy/module/cppyy/bench/hsimple.py b/pypy/module/cppyy/bench/hsimple.py --- a/pypy/module/cppyy/bench/hsimple.py +++ b/pypy/module/cppyy/bench/hsimple.py @@ -37,7 +37,7 @@ import random if _reflex: - gROOT.SetBatch(True) + gROOT.SetBatch(True) # Create a new ROOT binary machine independent file. # Note that this file may contain any kind of ROOT objects, histograms, diff --git a/pypy/module/cppyy/bench/hsimple_rflx.py b/pypy/module/cppyy/bench/hsimple_rflx.py --- a/pypy/module/cppyy/bench/hsimple_rflx.py +++ b/pypy/module/cppyy/bench/hsimple_rflx.py @@ -80,14 +80,14 @@ for i in xrange(2500000): # Generate random values. # px, py = random.gauss(0, 1), random.gauss(0, 1) - px, py = random.Gaus(0, 1), random.Gaus(0, 1) + px, py = random.Gaus(0, 1), random.Gaus(0, 1) # pt = (px*px + py*py)**0.5 - pt = math.sqrt(px*px + py*py) + pt = math.sqrt(px*px + py*py) # pt = (px*px + py*py) # random = rndm(1) # Fill histograms. - hpx.Fill(pt) + hpx.Fill(pt) # hpxpyFill( px, py ) # hprofFill( px, pz ) # ntupleFill( px, py, pz, random, i ) @@ -105,7 +105,7 @@ #gBenchmark.Show( 'hsimple' ) -hpx.Print() +hpx.Print() # Save all objects in this file. #hpx.SetFillColor( 0 ) diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -542,7 +542,7 @@ # TODO: get the capi-identify test selection right ... if self.capi_identity != 'CINT': # don't test anything for Reflex - return + return import cppyy diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -131,11 +131,11 @@ # TVectorF is a typedef of floats v = cppyy.gbl.TVectorF(N) for i in range(N): - v[i] = i*i + v[i] = i*i assert len(v) == N for j in v: - assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0. + assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0. class AppTestCINTTTREE: diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -97,9 +97,9 @@ assert hasattr(v, 'end' ) for i in range(self.N): - v.push_back(cppyy.gbl.just_a_class()) - v[i].m_i = i - assert v[i].m_i == i + v.push_back(cppyy.gbl.just_a_class()) + v[i].m_i = i + assert v[i].m_i == i assert len(v) == self.N v.destruct() @@ -332,7 +332,7 @@ a = std.list(int)() for arg in a: - pass + pass class AppTestSTLMAP: @@ -395,7 +395,7 @@ m = std.map(int, int)() for key, value in m: - pass + pass def test04_unsignedvalue_typemap_types(self): """Test assignability of maps with unsigned value types""" diff --git a/pypy/module/oracle/interp_pool.py b/pypy/module/oracle/interp_pool.py --- a/pypy/module/oracle/interp_pool.py +++ b/pypy/module/oracle/interp_pool.py @@ -130,14 +130,14 @@ self.checkConnected(space) if __args__.keywords: - keywords = __args__.keywords + ["pool"] + keywords = __args__.keywords + ["pool"] else: - keywords = ["pool"] + keywords = ["pool"] if __args__.keywords_w: - keywords_w = __args__.keywords_w + [space.wrap(self)] + keywords_w = __args__.keywords_w + [space.wrap(self)] else: - keywords_w = [space.wrap(self)] - + keywords_w = [space.wrap(self)] + newargs = Arguments(space, __args__.arguments_w, keywords, diff --git a/pypy/module/parser/__init__.py b/pypy/module/parser/__init__.py --- a/pypy/module/parser/__init__.py +++ b/pypy/module/parser/__init__.py @@ -2,28 +2,27 @@ class Module(MixedModule): - """The builtin parser module.""" + """The builtin parser module.""" - applevel_name = 'parser' + applevel_name = 'parser' - appleveldefs = { - } + appleveldefs = { + } - interpleveldefs = { - '__name__' : '(space.wrap("parser"))', - '__doc__' : '(space.wrap("parser module"))', - - 'suite' : 'pyparser.suite', - 'expr' : 'pyparser.expr', - 'issuite' : 'pyparser.issuite', - 'isexpr' : 'pyparser.isexpr', - 'STType' : 'pyparser.W_STType', - 'ast2tuple' : 'pyparser.st2tuple', - 'st2tuple' : 'pyparser.st2tuple', - 'ast2list' : 'pyparser.st2list', - 'ast2tuple' : 'pyparser.st2tuple', - 'ASTType' : 'pyparser.W_STType', - 'compilest' : 'pyparser.compilest', - 'compileast' : 'pyparser.compilest', - 'ParserError' : 'space.new_exception_class("parser.ParserError")', - } + interpleveldefs = { + '__name__' : '(space.wrap("parser"))', + '__doc__' : '(space.wrap("parser module"))', + 'suite' : 'pyparser.suite', + 'expr' : 'pyparser.expr', + 'issuite' : 'pyparser.issuite', + 'isexpr' : 'pyparser.isexpr', + 'STType' : 'pyparser.W_STType', + 'ast2tuple' : 'pyparser.st2tuple', + 'st2tuple' : 'pyparser.st2tuple', + 'ast2list' : 'pyparser.st2list', + 'ast2tuple' : 'pyparser.st2tuple', + 'ASTType' : 'pyparser.W_STType', + 'compilest' : 'pyparser.compilest', + 'compileast' : 'pyparser.compilest', + 'ParserError' : 'space.new_exception_class("parser.ParserError")', + } diff --git a/pypy/module/parser/pyparser.py b/pypy/module/parser/pyparser.py --- a/pypy/module/parser/pyparser.py +++ b/pypy/module/parser/pyparser.py @@ -75,7 +75,7 @@ info = pyparse.CompileInfo("", mode) parser = pyparse.PythonParser(space) try: - tree = parser.parse_source(source, info) + tree = parser.parse_source(source, info) except error.IndentationError, e: raise OperationError(space.w_IndentationError, e.wrap_info(space)) diff --git a/pypy/module/pyexpat/test/test_build.py b/pypy/module/pyexpat/test/test_build.py --- a/pypy/module/pyexpat/test/test_build.py +++ b/pypy/module/pyexpat/test/test_build.py @@ -12,7 +12,7 @@ py.test.skip("No module expat") try: - from pypy.module.pyexpat import interp_pyexpat + from pypy.module.pyexpat import interp_pyexpat except (ImportError, CompilationError): py.test.skip("Expat not installed") diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -63,7 +63,7 @@ offset = {} for i, op in enumerate(oplist): if i != 1: - offset[op] = i + offset[op] = i token = JitCellToken() token.number = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -146,9 +146,9 @@ for op in self._ops_for_chunk(chunk, include_guard_not_invalidated): yield op else: - for op in chunk.operations: - if op.name == 'label': - yield op + for op in chunk.operations: + if op.name == 'label': + yield op def allops(self, *args, **kwds): return list(self._allops(*args, **kwds)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -548,10 +548,10 @@ log = self.run(f, import_site=True) loop, = log.loops_by_id('ntohs') assert loop.match_by_id('ntohs', """ - guard_not_invalidated(descr=...) p12 = call(ConstClass(ntohs), 1, descr=...) guard_no_exception(descr=...) - """) + """, + include_guard_not_invalidated=False) # py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -105,7 +105,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) f13 = getarrayitem_raw(i8, i6, descr=) @@ -117,7 +116,7 @@ i20 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_array_of_floats(self): try: @@ -142,7 +141,6 @@ assert loop.match(""" i10 = int_lt(i6, 1000) guard_true(i10, descr=...) - guard_not_invalidated? i11 = int_lt(i6, i7) guard_true(i11, descr=...) i13 = getarrayitem_raw(i8, i6, descr=) @@ -157,7 +155,7 @@ i23 = int_add(i6, 1) --TICK-- jump(..., descr=...) - """) + """, ignore_ops=['guard_not_invalidated']) def test_zeropadded(self): diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -207,12 +207,12 @@ tzname = ["", ""] if _WIN: - c_tzset() - timezone = c_get_timezone() - altzone = timezone - 3600 - daylight = c_get_daylight() - tzname_ptr = c_get_tzname() - tzname = rffi.charp2str(tzname_ptr[0]), rffi.charp2str(tzname_ptr[1]) + c_tzset() + timezone = c_get_timezone() + altzone = timezone - 3600 + daylight = c_get_daylight() + tzname_ptr = c_get_tzname() + tzname = rffi.charp2str(tzname_ptr[0]), rffi.charp2str(tzname_ptr[1]) if _POSIX: if _CYGWIN: diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -91,13 +91,13 @@ next(cur) def test_cursor_after_close(con): - cur = con.execute('select 1') - cur.close() - con.close() - pytest.raises(_sqlite3.ProgrammingError, "cur.close()") - # raises ProgrammingError because should check closed before check args - pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)") - pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)") + cur = con.execute('select 1') + cur.close() + con.close() + pytest.raises(_sqlite3.ProgrammingError, "cur.close()") + # raises ProgrammingError because should check closed before check args + pytest.raises(_sqlite3.ProgrammingError, "cur.execute(1,2,3,4,5)") + pytest.raises(_sqlite3.ProgrammingError, "cur.executemany(1,2,3,4,5)") @pytest.mark.skipif("not hasattr(sys, 'pypy_translation_info')") def test_connection_del(tmpdir): diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -354,13 +354,13 @@ if _check_notimplemented(space, w_res): return w_res if w_right_impl is not None: - if space.is_w(w_obj3, space.w_None): - w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1) - else: - w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1, + if space.is_w(w_obj3, space.w_None): + w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1) + else: + w_res = space.get_and_call_function(w_right_impl, w_obj2, w_obj1, w_obj3) - if _check_notimplemented(space, w_res): - return w_res + if _check_notimplemented(space, w_res): + return w_res raise OperationError(space.w_TypeError, space.wrap("operands do not support **")) diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -551,11 +551,11 @@ if left: #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, u_self[lpos],w_chars) while lpos < rpos and u_self[lpos].isspace(): - lpos += 1 + lpos += 1 if right: while rpos > lpos and u_self[rpos - 1].isspace(): - rpos -= 1 + rpos -= 1 assert rpos >= lpos # annotator hint, don't remove return sliced(space, u_self, lpos, rpos, w_self) diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -24,26 +24,26 @@ """) def test_mix_classes(self): - @self.retry - def run(): - import __pypy__ - class A(object): - def f(self): - return 42 - class B(object): - def f(self): - return 43 - class C(object): - def f(self): - return 44 - l = [A(), B(), C()] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 + i % 3 - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 15 - assert cache_counter[1] >= 3 # should be (27, 3) - assert sum(cache_counter) == 30 + @self.retry + def run(): + import __pypy__ + class A(object): + def f(self): + return 42 + class B(object): + def f(self): + return 43 + class C(object): + def f(self): + return 44 + l = [A(), B(), C()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + i % 3 + cache_counter = __pypy__.method_cache_counter("f") + assert cache_counter[0] >= 15 + assert cache_counter[1] >= 3 # should be (27, 3) + assert sum(cache_counter) == 30 def test_change_methods(self): # this test fails because of the following line in typeobject.py:427 @@ -53,102 +53,110 @@ # calling space.str_w, which .encode('ascii') the string, thus # creating new strings all the time. The problem should be solved when # we implement proper unicode identifiers in py3k - @self.retry - def run(): - import __pypy__ - class A(object): - def f(self): - return 42 - l = [A()] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 + i - A.f = eval("lambda self: %s" % (42 + i + 1, )) - cache_counter = __pypy__.method_cache_counter("f") - # - # a bit of explanation about what's going on. (1) is the line "a.f()" - # and (2) is "A.f = ...". - # - # at line (1) we do the lookup on type(a).f - # - # at line (2) we do a setattr on A. However, descr_setattr does also a - # lookup of type(A).f i.e. type.f, to check if by chance 'f' is a data - # descriptor. - # - # At the first iteration: - # (1) is a miss because it's the first lookup of A.f. The result is cached - # - # (2) is a miss because it is the first lookup of type.f. The - # (non-existant) result is cached. The version of A changes, and 'f' - # is changed to be a cell object, so that subsequest assignments won't - # change the version of A - # - # At the second iteration: - # (1) is a miss because the version of A changed just before - # (2) is a hit, because type.f is cached. The version of A no longer changes - # - # At the third and subsequent iterations: - # (1) is a hit, because the version of A did not change - # (2) is a hit, see above - assert cache_counter == (17, 3) + @self.retry + def run(): + import __pypy__ + class A(object): + def f(self): + return 42 + l = [A()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + i + A.f = eval("lambda self: %s" % (42 + i + 1, )) + cache_counter = __pypy__.method_cache_counter("f") + # + # a bit of explanation about what's going on. (1) is the line "a.f()" + # and (2) is "A.f = ...". + # + # at line (1) we do the lookup on type(a).f + # + # at line (2) we do a setattr on A. However, descr_setattr does also a + # lookup of type(A).f i.e. type.f, to check if by chance 'f' is a data + # descriptor. + # + # At the first iteration: + # (1) is a miss because it's the first lookup of A.f. The result is cached + # + # (2) is a miss because it is the first lookup of type.f. The + # (non-existant) result is cached. The version of A changes, and 'f' + # is changed to be a cell object, so that subsequest assignments won't + # change the version of A + # + # At the second iteration: + # (1) is a miss because the version of A changed just before + # (2) is a hit, because type.f is cached. The version of A no longer changes + # + # At the third and subsequent iterations: + # (1) is a hit, because the version of A did not change + # (2) is a hit, see above + assert cache_counter == (17, 3) def test_subclasses(self): - @self.retry - def run(): - import __pypy__ - class A(object): - def f(self): - return 42 - class B(object): - def f(self): - return 43 - class C(A): - pass - l = [A(), B(), C()] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 + (i % 3 == 1) - cache_counter = __pypy__.method_cache_counter("f") - assert cache_counter[0] >= 15 - assert cache_counter[1] >= 3 # should be (27, 3) - assert sum(cache_counter) == 30 + @self.retry + def run(): + import __pypy__ + class A(object): + def f(self): + return 42 + class B(object): + def f(self): + return 43 + class C(A): + pass + l = [A(), B(), C()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + (i % 3 == 1) + cache_counter = __pypy__.method_cache_counter("f") + assert cache_counter[0] >= 15 + assert cache_counter[1] >= 3 # should be (27, 3) + assert sum(cache_counter) == 30 def test_many_names(self): - @self.retry - def run(): - import __pypy__ - laste = None - for j in range(20): - class A(object): - foo = 5 - bar = 6 - baz = 7 - xyz = 8 - stuff = 9 - a = 10 - foobar = 11 + @self.retry + def run(): + import __pypy__ + laste = None + for j in range(20): + class A(object): + def f(self): + return 42 + class B(object): + def f(self): + return 43 + class C(A): + pass + l = [A(), B(), C()] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + (i % 3 == 1) + cache_counter = __pypy__.method_cache_counter("f") + assert cache_counter[0] >= 15 + assert cache_counter[1] >= 3 # should be (27, 3) + assert sum(cache_counter) == 30 - a = A() - names = [name for name in A.__dict__.keys() - if not name.startswith('_')] - names.sort() - names_repeated = names * 10 - result = [] - __pypy__.reset_method_cache_counter() - for name in names_repeated: - result.append(getattr(a, name)) - append_counter = __pypy__.method_cache_counter("append") - names_counters = [__pypy__.method_cache_counter(name) - for name in names] - try: - assert append_counter[0] >= 10 * len(names) - 1 - for name, count in zip(names, names_counters): - assert count == (9, 1), str((name, count)) - break - except AssertionError as e: - laste = e - else: - raise laste + a = A() + names = [name for name in A.__dict__.keys() + if not name.startswith('_')] + names.sort() + names_repeated = names * 10 + result = [] + __pypy__.reset_method_cache_counter() + for name in names_repeated: + result.append(getattr(a, name)) + append_counter = __pypy__.method_cache_counter("append") + names_counters = [__pypy__.method_cache_counter(name) + for name in names] + try: + assert append_counter[0] >= 10 * len(names) - 1 + for name, count in zip(names, names_counters): + assert count == (9, 1), str((name, count)) + break + except AssertionError as e: + laste = e + else: + raise laste def test_mutating_bases(self): class C(object): @@ -170,50 +178,50 @@ assert e.foo == 3 def test_custom_metaclass(self): - @self.retry - def run(): - import __pypy__ - for j in range(20): - class MetaA(type): - def __getattribute__(self, x): - return 1 - def f(self): - return 42 - A = type.__new__(MetaA, "A", (), {"f": f}) - l = [type.__getattribute__(A, "__new__")(A)] * 10 - __pypy__.reset_method_cache_counter() - for i, a in enumerate(l): - assert a.f() == 42 - cache_counter = __pypy__.method_cache_counter("f") - assert sum(cache_counter) == 10 - if cache_counter == (9, 1): - break - #else the moon is misaligned, try again - else: - raise AssertionError("cache_counter = %r" % (cache_counter,)) + @self.retry + def run(): + import __pypy__ + for j in range(20): + class MetaA(type): + def __getattribute__(self, x): + return 1 + def f(self): + return 42 + A = type.__new__(MetaA, "A", (), {"f": f}) + l = [type.__getattribute__(A, "__new__")(A)] * 10 + __pypy__.reset_method_cache_counter() + for i, a in enumerate(l): + assert a.f() == 42 + cache_counter = __pypy__.method_cache_counter("f") + assert sum(cache_counter) == 10 + if cache_counter == (9, 1): + break + #else the moon is misaligned, try again + else: + raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): - @self.retry - def run(): - import __pypy__ - class A(object): - x = 1 - y = 2 - __pypy__.reset_method_cache_counter() - a = A() - for i in range(100): - assert a.y == 2 - assert a.x == i + 1 - A.x += 1 - cache_counter = __pypy__.method_cache_counter("x") - assert cache_counter[0] >= 350 - assert cache_counter[1] >= 1 - assert sum(cache_counter) == 400 + @self.retry + def run(): + import __pypy__ + class A(object): + x = 1 + y = 2 + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + assert a.x == i + 1 + A.x += 1 + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] >= 350 + assert cache_counter[1] >= 1 + assert sum(cache_counter) == 400 - __pypy__.reset_method_cache_counter() - a = A() - for i in range(100): - assert a.y == 2 - setattr(a, "a%s" % i, i) - cache_counter = __pypy__.method_cache_counter("x") - assert cache_counter[0] == 0 # 0 hits, because all the attributes are new + __pypy__.reset_method_cache_counter() + a = A() + for i in range(100): + assert a.y == 2 + setattr(a, "a%s" % i, i) + cache_counter = __pypy__.method_cache_counter("x") + assert cache_counter[0] == 0 # 0 hits, because all the attributes are new diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -337,11 +337,11 @@ if left: while lpos < rpos and u_self[lpos] in u_chars: - lpos += 1 + lpos += 1 if right: while rpos > lpos and u_self[rpos - 1] in u_chars: - rpos -= 1 + rpos -= 1 assert rpos >= 0 result = u_self[lpos: rpos] @@ -356,11 +356,11 @@ if left: while lpos < rpos and _isspace(u_self[lpos]): - lpos += 1 + lpos += 1 if right: while rpos > lpos and _isspace(u_self[rpos - 1]): - rpos -= 1 + rpos -= 1 assert rpos >= 0 result = u_self[lpos: rpos] diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -306,12 +306,13 @@ raises(TypeError, operate, A()) def test_missing_getattribute(self): - class X(object): pass + class X(object): + pass class Y(X): - class __metaclass__(type): - def mro(cls): - return [cls, X] + class __metaclass__(type): + def mro(cls): + return [cls, X] x = X() x.__class__ = Y diff --git a/pypy/tool/pytest/result.py b/pypy/tool/pytest/result.py --- a/pypy/tool/pytest/result.py +++ b/pypy/tool/pytest/result.py @@ -1,53 +1,53 @@ -import sys +import sys import py import re -class Result(object): - def __init__(self, init=True): +class Result(object): + def __init__(self, init=True): self._headers = {} self._blocks = {} self._blocknames = [] - if init: - stdinit(self) + if init: + stdinit(self) - def __setitem__(self, name, value): - self._headers[name.lower()] = value + def __setitem__(self, name, value): + self._headers[name.lower()] = value - def __getitem__(self, name): + def __getitem__(self, name): return self._headers[name.lower()] - def get(self, name, default): - return self._headers.get(name, default) - - def __delitem__(self, name): + def get(self, name, default): + return self._headers.get(name, default) + + def __delitem__(self, name): del self._headers[name.lower()] - def items(self): + def items(self): return self._headers.items() - def addnamedtext(self, name, text): + def addnamedtext(self, name, text): assert isinstance(text, basestring) assert isinstance(name, str) - self._blocknames.append(name) - self._blocks[name] = text + self._blocknames.append(name) + self._blocks[name] = text - def getnamedtext(self, name): + def getnamedtext(self, name): return self._blocks[name] - def repr_short_error(self): - if not self.isok(): - if 'reportdiff' in self._blocks: + def repr_short_error(self): + if not self.isok(): + if 'reportdiff' in self._blocks: return "output comparison failed, see reportdiff" - else: - text = self.getnamedtext('stderr') + else: + text = self.getnamedtext('stderr') lines = text.strip().split('\n') - if lines: + if lines: return lines[-1] - def repr_mimemessage(self): - from email.MIMEMultipart import MIMEMultipart + def repr_mimemessage(self): + from email.MIMEMultipart import MIMEMultipart from email.MIMEText import MIMEText - + outer = MIMEMultipart() items = self._headers.items() items.sort() @@ -56,31 +56,31 @@ assert ':' not in name chars = map(ord, name) assert min(chars) >= 33 and max(chars) <= 126 - outer[name] = str(value) - if not isinstance(value, str): - typename = type(value).__name__ + outer[name] = str(value) + if not isinstance(value, str): + typename = type(value).__name__ assert typename in vars(py.std.__builtin__) - reprs[name] = typename + reprs[name] = typename - outer['_reprs'] = repr(reprs) - - for name in self._blocknames: + outer['_reprs'] = repr(reprs) + + for name in self._blocknames: text = self._blocks[name] m = MIMEText(text) m.add_header('Content-Disposition', 'attachment', filename=name) - outer.attach(m) - return outer + outer.attach(m) + return outer def grep_nr(self,text,section='stdout'): stdout = self._blocks[section] find = re.search('%s(?P\d+)'%text,stdout) - if find: + if find: return float(find.group('nr')) - return 0. + return 0. def ratio_of_passed(self): if self.isok(): - return 1. + return 1. elif self.istimeout(): return 0. else: @@ -88,16 +88,16 @@ if nr > 0: return (nr - (self.grep_nr('errors=') + self.grep_nr('failures=')))/nr else: - passed = self.grep_nr('TestFailed: ',section='stderr') - run = self.grep_nr('TestFailed: \d+/',section='stderr') - if run > 0: - return passed/run - else: - run = self.grep_nr('TestFailed: \d+ of ',section='stderr') - if run > 0 : - return (run-passed)/run - else: - return 0.0 + passed = self.grep_nr('TestFailed: ',section='stderr') + run = self.grep_nr('TestFailed: \d+/',section='stderr') + if run > 0: + return passed/run + else: + run = self.grep_nr('TestFailed: \d+ of ',section='stderr') + if run > 0 : + return (run-passed)/run + else: + return 0.0 def isok(self): return self['outcome'].lower() == 'ok' @@ -105,7 +105,7 @@ def iserror(self): return self['outcome'].lower()[:3] == 'err' or self['outcome'].lower() == 'fail' - def istimeout(self): + def istimeout(self): return self['outcome'].lower() == 't/o' # XXX backward compatibility @@ -114,7 +114,7 @@ return msg f = open(str(path), 'r') msg = f.read() - f.close() + f.close() for broken in ('exit status', 'cpu model', 'cpu mhz'): valid = broken.replace(' ','-') invalid = msg.find(broken+':') @@ -127,10 +127,10 @@ def sanitize_reprs(reprs): if 'exit status' in reprs: reprs['exit-status'] = reprs.pop('exit status') - -class ResultFromMime(Result): - def __init__(self, path): - super(ResultFromMime, self).__init__(init=False) + +class ResultFromMime(Result): + def __init__(self, path): + super(ResultFromMime, self).__init__(init=False) f = open(str(path), 'r') from email import message_from_file msg = message_from_file(f) @@ -142,48 +142,48 @@ self._reprs = eval(msg['_reprs']) del msg['_reprs'] sanitize_reprs(self._reprs) - for name, value in msg.items(): - if name in self._reprs: + for name, value in msg.items(): + if name in self._reprs: value = eval(value) # XXX security - self._headers[name] = value + self._headers[name] = value self.fspath = self['fspath'] - if self['platform'] == 'win32' and '\\' in self.fspath: + if self['platform'] == 'win32' and '\\' in self.fspath: self.testname = self.fspath.split('\\')[-1] - else: + else: self.testname = self.fspath.split('/')[-1] - #if sys.platform != 'win32' and '\\' in self.fspath: + #if sys.platform != 'win32' and '\\' in self.fspath: # self.fspath = py.path.local(self['fspath'].replace('\\' - self.path = path - - payload = msg.get_payload() - if payload: - for submsg in payload: + self.path = path + + payload = msg.get_payload() + if payload: + for submsg in payload: assert submsg.get_content_type() == 'text/plain' - fn = submsg.get_filename() + fn = submsg.get_filename() assert fn # XXX we need to deal better with encodings to # begin with content = submsg.get_payload() - for candidate in 'utf8', 'latin1': + for candidate in 'utf8', 'latin1': try: text = unicode(content, candidate) - except UnicodeDecodeError: + except UnicodeDecodeError: continue else: - unicode(content, candidate) - self.addnamedtext(fn, text) + unicode(content, candidate) + self.addnamedtext(fn, text) - def ismodifiedtest(self): - # XXX we need proper cross-platform paths! + def ismodifiedtest(self): + # XXX we need proper cross-platform paths! return 'modified' in self.fspath - def __repr__(self): - return '<%s (%s) %r rev=%s>' %(self.__class__.__name__, - self['outcome'], - self.fspath, + def __repr__(self): + return '<%s (%s) %r rev=%s>' %(self.__class__.__name__, + self['outcome'], + self.fspath, self['pypy-revision']) -def stdinit(result): +def stdinit(result): import getpass import socket try: @@ -192,24 +192,24 @@ username = 'unknown' userhost = '%s@%s' % (username, socket.gethostname()) result['testreport-version'] = "1.1.1" - result['userhost'] = userhost - result['platform'] = sys.platform - result['python-version-info'] = sys.version_info - info = try_getcpuinfo() + result['userhost'] = userhost + result['platform'] = sys.platform + result['python-version-info'] = sys.version_info + info = try_getcpuinfo() if info is not None: result['cpu-model'] = info.get('model name', "unknown") result['cpu-mhz'] = info.get('cpu mhz', 'unknown') # # # -def try_getcpuinfo(): - if sys.platform.startswith('linux'): +def try_getcpuinfo(): + if sys.platform.startswith('linux'): cpuinfopath = py.path.local('/proc/cpuinfo') - if cpuinfopath.check(file=1): + if cpuinfopath.check(file=1): d = {} - for line in cpuinfopath.readlines(): - if line.strip(): - name, value = line.split(':', 1) - name = name.strip().lower() - d[name] = value.strip() - return d + for line in cpuinfopath.readlines(): + if line.strip(): + name, value = line.split(':', 1) + name = name.strip().lower() + d[name] = value.strip() + return d diff --git a/pypy/tool/slaveproc.py b/pypy/tool/slaveproc.py --- a/pypy/tool/slaveproc.py +++ b/pypy/tool/slaveproc.py @@ -39,7 +39,7 @@ class SlaveProcess(object): _broken = False - + def __init__(self, slave_impl): if sys.platform == 'win32': unbuffered = '' @@ -58,7 +58,7 @@ def close(self): if not self._broken: - assert self.cmd(None) == 'done' + assert self.cmd(None) == 'done' self.exchg.forceclose() class Slave(object): @@ -70,7 +70,7 @@ exchg = Exchange(sys.stdin, sys.stdout) while True: try: - cmd = exchg.recv() + cmd = exchg.recv() except EOFError: # master died break if cmd is None: @@ -78,4 +78,3 @@ break result = self.do_cmd(cmd) exchg.send(result) - diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -177,8 +177,8 @@ r.const = isinstance(s_obj.const, typ) elif our_issubclass(s_obj.knowntype, typ): if not s_obj.can_be_none(): - r.const = True - elif not our_issubclass(typ, s_obj.knowntype): + r.const = True + elif not our_issubclass(typ, s_obj.knowntype): r.const = False elif s_obj.knowntype == int and typ == bool: # xxx this will explode in case of generalisation # from bool to int, notice that isinstance( , bool|int) @@ -207,12 +207,12 @@ r.const = hasattr(s_obj.const, s_attr.const) elif (isinstance(s_obj, SomePBC) and s_obj.getKind() is description.FrozenDesc): - answers = {} - for d in s_obj.descriptions: - answer = (d.s_read_attribute(s_attr.const) != s_ImpossibleValue) - answers[answer] = True - if len(answers) == 1: - r.const, = answers + answers = {} + for d in s_obj.descriptions: + answer = (d.s_read_attribute(s_attr.const) != s_ImpossibleValue) + answers[answer] = True + if len(answers) == 1: + r.const, = answers return r ##def builtin_callable(s_obj): @@ -344,7 +344,7 @@ return SomeAddress() def unicodedata_decimal(s_uchr): - raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" + raise TypeError, "unicodedate.decimal() calls should not happen at interp-level" def test(*args): return s_Bool @@ -395,7 +395,7 @@ if hasattr(object.__init__, 'im_func'): BUILTIN_ANALYZERS[object.__init__.im_func] = object_init else: - BUILTIN_ANALYZERS[object.__init__] = object_init + BUILTIN_ANALYZERS[object.__init__] = object_init # import BUILTIN_ANALYZERS[__import__] = import_func @@ -549,12 +549,12 @@ return s_Bool def classof(i): - assert isinstance(i, SomeOOInstance) + assert isinstance(i, SomeOOInstance) return SomeOOClass(i.ootype) def subclassof(class1, class2): - assert isinstance(class1, SomeOOClass) - assert isinstance(class2, SomeOOClass) + assert isinstance(class1, SomeOOClass) + assert isinstance(class2, SomeOOClass) return s_Bool def runtimenew(c): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -5,14 +5,12 @@ from __future__ import absolute_import from types import MethodType -from rpython.annotator.model import \ - SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, \ - SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, \ - SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, SomePBC, \ - SomeTypedAddressAccess, SomeAddress, SomeType, \ - s_ImpossibleValue, s_Bool, s_None, \ - unionof, missing_operation, add_knowntypedata, HarmlesslyBlocked, \ - SomeWeakRef, SomeUnicodeString +from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, + SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, + SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, + SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, + s_Bool, s_None, unionof, missing_operation, add_knowntypedata, + HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? @@ -39,7 +37,7 @@ def type(obj, *moreargs): if moreargs: - raise Exception, 'type() called with more than one argument' + raise Exception('type() called with more than one argument') r = SomeType() bk = getbookkeeper() op = bk._find_current_op(opname="type", arity=1, pos=0, s_type=obj) diff --git a/rpython/jit/backend/arm/test/conftest.py b/rpython/jit/backend/arm/test/conftest.py --- a/rpython/jit/backend/arm/test/conftest.py +++ b/rpython/jit/backend/arm/test/conftest.py @@ -16,5 +16,7 @@ dest="run_translation_tests", help="run tests that translate code") -def pytest_ignore_collect(path, config): - return not cpu.startswith('arm') +def pytest_collect_directory(path, parent): + if not cpu.startswith('arm'): + py.test.skip("ARM(v7) tests skipped: cpu is %r" % (cpu,)) +pytest_collect_file = pytest_collect_directory diff --git a/rpython/jit/backend/arm/test/test_instr_codebuilder.py b/rpython/jit/backend/arm/test/test_instr_codebuilder.py --- a/rpython/jit/backend/arm/test/test_instr_codebuilder.py +++ b/rpython/jit/backend/arm/test/test_instr_codebuilder.py @@ -76,16 +76,16 @@ self.assert_equal('ASR r7, r5, #24') def test_orr_rr_no_shift(self): - self.cb.ORR_rr(r.r0.value, r.r7.value,r.r12.value) + self.cb.ORR_rr(r.r0.value, r.r7.value, r.r12.value) self.assert_equal('ORR r0, r7, r12') def test_orr_rr_lsl_8(self): - self.cb.ORR_rr(r.r0.value, r.r7.value,r.r12.value, 8) + self.cb.ORR_rr(r.r0.value, r.r7.value, r.r12.value, 8) self.assert_equal('ORR r0, r7, r12, lsl #8') def test_push_one_reg(self): if get_as_version() < (2, 23): - py.test.xfail("GNU as before version 2.23 generates encoding A1 for " + py.test.xfail("GNU as before version 2.23 generates encoding A1 for " "pushing only one register") self.cb.PUSH([r.r1.value]) self.assert_equal('PUSH {r1}') diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -374,7 +374,7 @@ nonconstbox = clonebox def getref_base(self): - return self.value + return self.value def getref(self, OBJ): return ootype.cast_from_object(OBJ, self.getref_base()) diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -183,8 +183,8 @@ if snapshot is None: return lltype.nullptr(NUMBERING), {}, 0 if snapshot in self.numberings: - numb, liveboxes, v = self.numberings[snapshot] - return numb, liveboxes.copy(), v + numb, liveboxes, v = self.numberings[snapshot] + return numb, liveboxes.copy(), v numb1, liveboxes, v = self.number(optimizer, snapshot.prev) n = len(liveboxes) - v diff --git a/rpython/jit/metainterp/test/test_greenfield.py b/rpython/jit/metainterp/test/test_greenfield.py --- a/rpython/jit/metainterp/test/test_greenfield.py +++ b/rpython/jit/metainterp/test/test_greenfield.py @@ -56,5 +56,6 @@ class TestLLtypeGreenFieldsTests(GreenFieldsTests, LLJitMixin): pass + class TestOOtypeGreenFieldsTests(GreenFieldsTests, OOJitMixin): - pass + pass diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -178,5 +178,6 @@ class TestLLtypeImmutableFieldsTests(ImmutableFieldsTests, LLJitMixin): pass + class TestOOtypeImmutableFieldsTests(ImmutableFieldsTests, OOJitMixin): - pass + pass diff --git a/rpython/jit/tl/test/test_tl.py b/rpython/jit/tl/test/test_tl.py --- a/rpython/jit/tl/test/test_tl.py +++ b/rpython/jit/tl/test/test_tl.py @@ -79,10 +79,10 @@ ] def test_ops(self): - for insn, pyop, values in self.ops: - for first, second in values: - code = [PUSH, first, PUSH, second, insn] - assert self.interp(list2bytecode(code)) == pyop(first, second) + for insn, pyop, values in self.ops: + for first, second in values: + code = [PUSH, first, PUSH, second, insn] + assert self.interp(list2bytecode(code)) == pyop(first, second) def test_branch_forward(self): diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py --- a/rpython/memory/gc/generation.py +++ b/rpython/memory/gc/generation.py @@ -468,7 +468,7 @@ JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS def write_barrier(self, newvalue, addr_struct): - if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: + if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS: self.remember_young_pointer(addr_struct, newvalue) def _setup_wb(self): diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -80,10 +80,11 @@ self.ignore) def __getstate__(self): - return (self.token_regexs, self.names, self.ignore) + return (self.token_regexs, self.names, self.ignore) def __setstate__(self, args): - self.__init__(*args) + self.__init__(*args) + class DummyLexer(Lexer): def __init__(self, matcher, automaton, ignore): diff --git a/rpython/rlib/rope.py b/rpython/rlib/rope.py --- a/rpython/rlib/rope.py +++ b/rpython/rlib/rope.py @@ -79,7 +79,7 @@ def is_ascii(self): raise NotImplementedError("base class") - + def is_bytestring(self): raise NotImplementedError("base class") @@ -144,7 +144,7 @@ def __init__(self, s): assert isinstance(s, str) self.s = s - + def length(self): return len(self.s) @@ -245,7 +245,7 @@ def __init__(self, u): assert isinstance(u, unicode) self.u = u - + def length(self): return len(self.u) @@ -254,7 +254,7 @@ def is_ascii(self): return False # usually not - + def is_bytestring(self): return False @@ -415,7 +415,7 @@ def flatten_unicode(self): f = fringe(self) return u"".join([node.flatten_unicode() for node in f]) - + def hash_part(self): return self.additional_info().hash @@ -435,7 +435,7 @@ return self return rebalance([self], self.len) - + def _concat(self, other): if isinstance(other, LiteralNode): r = self.right @@ -946,7 +946,7 @@ self.index = 0 if start: self._advance_to(start) - + def _advance_to(self, index): self.index = self.iter._seekforward(index) self.node = self.iter.next() @@ -1106,7 +1106,7 @@ self.stack.pop() raise StopIteration - + def seekback(self, numchars): if numchars <= self.index: self.index -= numchars @@ -1148,7 +1148,7 @@ self.stop = self.start else: self.restart_positions = construct_restart_positions_node(sub) - + def next(self): if self.search_length == 0: if (self.stop - self.start) < 0: @@ -1257,20 +1257,20 @@ def strip(node, left=True, right=True, predicate=lambda i: chr(i).isspace(), *extraargs): length = node.length() - + lpos = 0 rpos = length - + if left: iter = ItemIterator(node) while lpos < rpos and predicate(iter.nextint(), *extraargs): - lpos += 1 - + lpos += 1 + if right: iter = ReverseItemIterator(node) while rpos > lpos and predicate(iter.nextint(), *extraargs): - rpos -= 1 - + rpos -= 1 + assert rpos >= lpos return getslice_one(node, lpos, rpos) strip._annspecialcase_ = "specialize:arg(3)" @@ -1501,10 +1501,10 @@ ch = ord(s[i]) i += 1 if (ch < 0x80): - # Encode ASCII + # Encode ASCII result.append(chr(ch)) continue - # Encode Latin-1 + # Encode Latin-1 result.append(chr((0xc0 | (ch >> 6)))) result.append(chr((0x80 | (ch & 0x3f)))) return "".join(result) diff --git a/rpython/rlib/rstruct/ieee.py b/rpython/rlib/rstruct/ieee.py --- a/rpython/rlib/rstruct/ieee.py +++ b/rpython/rlib/rstruct/ieee.py @@ -163,7 +163,7 @@ # Raise on overflow (in some circumstances, may want to return # infinity instead). if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") + raise OverflowError("float too large to pack in this format") # check constraints if not objectmodel.we_are_translated(): @@ -219,7 +219,7 @@ # Raise on overflow (in some circumstances, may want to return # infinity instead). if exp >= MAX_EXP - MIN_EXP + 2: - raise OverflowError("float too large to pack in this format") + raise OverflowError("float too large to pack in this format") # check constraints if not objectmodel.we_are_translated(): diff --git a/rpython/rlib/test/test_rcomplex.py b/rpython/rlib/test/test_rcomplex.py --- a/rpython/rlib/test/test_rcomplex.py +++ b/rpython/rlib/test/test_rcomplex.py @@ -22,15 +22,15 @@ ((10, -3), (-5, 7), (15, -10)), ((42, 0.3), (42, 0.3), (0, 0)) ]: - assert c.c_sub(c1, c2) == result + assert c.c_sub(c1, c2) == result def test_mul(): - for c1, c2, result in [ + for c1, c2, result in [ ((0, 0), (0, 0), (0, 0)), ((1, 0), (2, 0), (2, 0)), ((0, 3), (0, 2), (-6, 0)), ((0, -3), (-5, 0), (0, 15)), - ]: + ]: assert c.c_mul(c1, c2) == result def test_div(): @@ -65,7 +65,7 @@ struct.pack('l',int(rhs_pieces[i]))) else: rhs_pieces[i] = float(rhs_pieces[i]) - #id, fn, arg1_real, arg1_imag arg2_real, arg2_imag = + #id, fn, arg1_real, arg1_imag arg2_real, arg2_imag = #exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1] flags = rhs_pieces[2:] id_f, fn = lhs_pieces[:2] @@ -108,7 +108,7 @@ (args[0][0], args[0][1], args[1][0], args[1][1]) else: return '(complex(%r, %r))' % (args[0], args[1]) - + def rAssertAlmostEqual(a, b, rel_err = 2e-15, abs_err = 5e-323, msg=''): """Fail if the two floating-point numbers are not almost equal. diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -61,7 +61,7 @@ # Things with a tuple return type have a fake impl for RPython, check # to see if the method has one. if hasattr(oo_math, method_name): - oofake = getattr(oo_math, method_name) + oofake = getattr(oo_math, method_name) register_external(getattr(module, name), arg_types, return_type, export_name='ll_math.%s' % method_name, sandboxsafe=True, diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -718,305 +718,305 @@ return None def lltype2ctypes(llobj, normalize=True): - """Convert the lltype object 'llobj' to its ctypes equivalent. - 'normalize' should only be False in tests, where we want to - inspect the resulting ctypes object manually. - """ - with rlock: - if isinstance(llobj, lltype._uninitialized): - return uninitialized2ctypes(llobj.TYPE) - if isinstance(llobj, llmemory.AddressAsInt): - cobj = ctypes.cast(lltype2ctypes(llobj.adr), ctypes.c_void_p) - res = intmask(cobj.value) - _int2obj[res] = llobj.adr.ptr._obj - return res - if isinstance(llobj, llmemory.fakeaddress): - llobj = llobj.ptr or 0 + """Convert the lltype object 'llobj' to its ctypes equivalent. + 'normalize' should only be False in tests, where we want to + inspect the resulting ctypes object manually. + """ + with rlock: + if isinstance(llobj, lltype._uninitialized): + return uninitialized2ctypes(llobj.TYPE) + if isinstance(llobj, llmemory.AddressAsInt): + cobj = ctypes.cast(lltype2ctypes(llobj.adr), ctypes.c_void_p) + res = intmask(cobj.value) + _int2obj[res] = llobj.adr.ptr._obj + return res + if isinstance(llobj, llmemory.fakeaddress): + llobj = llobj.ptr or 0 - T = lltype.typeOf(llobj) + T = lltype.typeOf(llobj) - if isinstance(T, lltype.Ptr): - if not llobj: # NULL pointer + if isinstance(T, lltype.Ptr): + if not llobj: # NULL pointer + if T == llmemory.GCREF: + return ctypes.c_void_p(0) + return get_ctypes_type(T)() + if T == llmemory.GCREF: - return ctypes.c_void_p(0) - return get_ctypes_type(T)() + if isinstance(llobj._obj, _llgcopaque): + return ctypes.c_void_p(llobj._obj.intval) + if isinstance(llobj._obj, int): # tagged pointer + return ctypes.c_void_p(llobj._obj) + container = llobj._obj.container + T = lltype.Ptr(lltype.typeOf(container)) + # otherwise it came from integer and we want a c_void_p with + # the same value + if getattr(container, 'llopaque', None): + try: + no = _opaque_objs_seen[container] + except KeyError: + no = len(_opaque_objs) + _opaque_objs.append(container) + _opaque_objs_seen[container] = no + return no * 2 + 1 + else: + container = llobj._obj + if isinstance(T.TO, lltype.FuncType): + # XXX a temporary workaround for comparison of lltype.FuncType + key = llobj._obj.__dict__.copy() + key['_TYPE'] = repr(key['_TYPE']) + items = key.items() + items.sort() + key = tuple(items) + if key in _all_callbacks: + return _all_callbacks[key] + v1voidlist = [(i, getattr(container, '_void' + str(i), None)) + for i in range(len(T.TO.ARGS)) + if T.TO.ARGS[i] is lltype.Void] + def callback_internal(*cargs): + cargs = list(cargs) + for v1 in v1voidlist: + cargs.insert(v1[0], v1[1]) + assert len(cargs) == len(T.TO.ARGS) + llargs = [] + for ARG, carg in zip(T.TO.ARGS, cargs): + if ARG is lltype.Void: + llargs.append(carg) + else: + llargs.append(ctypes2lltype(ARG, carg)) + if hasattr(container, 'graph'): + if LLInterpreter.current_interpreter is None: + raise AssertionError + llinterp = LLInterpreter.current_interpreter + try: + llres = llinterp.eval_graph(container.graph, llargs) + except LLException, lle: + llinterp._store_exception(lle) + return 0 + #except: + # import pdb + # pdb.set_trace() + else: + try: + llres = container._callable(*llargs) + except LLException, lle: + llinterp = LLInterpreter.current_interpreter + llinterp._store_exception(lle) + return 0 + assert lltype.typeOf(llres) == T.TO.RESULT + if T.TO.RESULT is lltype.Void: + return None + res = lltype2ctypes(llres) + if isinstance(T.TO.RESULT, lltype.Ptr): + _all_callbacks_results.append(res) + res = ctypes.cast(res, ctypes.c_void_p).value + if res is None: + return 0 + if T.TO.RESULT == lltype.SingleFloat: + res = res.value # baaaah, cannot return a c_float() + return res - if T == llmemory.GCREF: - if isinstance(llobj._obj, _llgcopaque): - return ctypes.c_void_p(llobj._obj.intval) - if isinstance(llobj._obj, int): # tagged pointer - return ctypes.c_void_p(llobj._obj) - container = llobj._obj.container - T = lltype.Ptr(lltype.typeOf(container)) - # otherwise it came from integer and we want a c_void_p with - # the same value - if getattr(container, 'llopaque', None): - try: - no = _opaque_objs_seen[container] - except KeyError: - no = len(_opaque_objs) - _opaque_objs.append(container) - _opaque_objs_seen[container] = no - return no * 2 + 1 - else: - container = llobj._obj - if isinstance(T.TO, lltype.FuncType): - # XXX a temporary workaround for comparison of lltype.FuncType - key = llobj._obj.__dict__.copy() - key['_TYPE'] = repr(key['_TYPE']) - items = key.items() - items.sort() - key = tuple(items) - if key in _all_callbacks: - return _all_callbacks[key] - v1voidlist = [(i, getattr(container, '_void' + str(i), None)) - for i in range(len(T.TO.ARGS)) - if T.TO.ARGS[i] is lltype.Void] - def callback_internal(*cargs): - cargs = list(cargs) - for v1 in v1voidlist: - cargs.insert(v1[0], v1[1]) - assert len(cargs) == len(T.TO.ARGS) - llargs = [] - for ARG, carg in zip(T.TO.ARGS, cargs): - if ARG is lltype.Void: - llargs.append(carg) - else: - llargs.append(ctypes2lltype(ARG, carg)) - if hasattr(container, 'graph'): - if LLInterpreter.current_interpreter is None: - raise AssertionError - llinterp = LLInterpreter.current_interpreter + def callback(*cargs): try: - llres = llinterp.eval_graph(container.graph, llargs) - except LLException, lle: - llinterp._store_exception(lle) - return 0 - #except: - # import pdb - # pdb.set_trace() + return callback_internal(*cargs) + except: + import sys + #if option.usepdb: + # import pdb; pdb.post_mortem(sys.exc_traceback) + global _callback_exc_info + _callback_exc_info = sys.exc_info() + raise + + if isinstance(T.TO.RESULT, lltype.Ptr): + TMod = lltype.Ptr(lltype.FuncType(T.TO.ARGS, + lltype.Signed)) + ctypes_func_type = get_ctypes_type(TMod) + res = ctypes_func_type(callback) + ctypes_func_type = get_ctypes_type(T) + res = ctypes.cast(res, ctypes_func_type) else: - try: - llres = container._callable(*llargs) - except LLException, lle: - llinterp = LLInterpreter.current_interpreter - llinterp._store_exception(lle) - return 0 - assert lltype.typeOf(llres) == T.TO.RESULT - if T.TO.RESULT is lltype.Void: - return None - res = lltype2ctypes(llres) - if isinstance(T.TO.RESULT, lltype.Ptr): - _all_callbacks_results.append(res) - res = ctypes.cast(res, ctypes.c_void_p).value - if res is None: - return 0 - if T.TO.RESULT == lltype.SingleFloat: - res = res.value # baaaah, cannot return a c_float() + ctypes_func_type = get_ctypes_type(T) + res = ctypes_func_type(callback) + _all_callbacks[key] = res + key2 = intmask(ctypes.cast(res, ctypes.c_void_p).value) + _int2obj[key2] = container return res From noreply at buildbot.pypy.org Wed Jul 10 18:50:21 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 10 Jul 2013 18:50:21 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: grrr grrr grrr. force_virtualizable can malloc. 2 weeks Message-ID: <20130710165021.65F361C3620@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65324:a04f2b2c5238 Date: 2013-07-10 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/a04f2b2c5238/ Log: grrr grrr grrr. force_virtualizable can malloc. 2 weeks diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -488,12 +488,12 @@ 'NEW_ARRAY/1d', 'NEWSTR/1', 'NEWUNICODE/1', + 'FORCE_VIRTUALIZABLE/1d', # forces a non-standard virtualizable '_MALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'MARK_OPAQUE_PTR/1b', - 'FORCE_VIRTUALIZABLE/1d', # forces a non-standard virtualizable # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- From noreply at buildbot.pypy.org Wed Jul 10 20:58:06 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 10 Jul 2013 20:58:06 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: ups Message-ID: <20130710185806.744341C1509@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65325:a83e379514aa Date: 2013-07-10 20:57 +0200 http://bitbucket.org/pypy/pypy/changeset/a83e379514aa/ Log: ups diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -158,7 +158,8 @@ return rop._MALLOC_FIRST <= self.getopnum() <= rop._MALLOC_LAST def can_malloc(self): - return self.is_call() or self.is_malloc() + return (rop._CANMALLOC_FIRST <= self.getopnum() <= rop._CANMALLOC_LAST + or self.is_call()) def is_call(self): return rop._CALL_FIRST <= self.getopnum() <= rop._CALL_LAST @@ -482,14 +483,16 @@ 'RAW_LOAD/2d', 'GETFIELD_GC/1d', 'GETFIELD_RAW/1d', + '_CANMALLOC_FIRST', '_MALLOC_FIRST', 'NEW/0d', 'NEW_WITH_VTABLE/1', 'NEW_ARRAY/1d', 'NEWSTR/1', 'NEWUNICODE/1', + '_MALLOC_LAST', 'FORCE_VIRTUALIZABLE/1d', # forces a non-standard virtualizable - '_MALLOC_LAST', + '_CANMALLOC_LAST' 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', From noreply at buildbot.pypy.org Wed Jul 10 22:06:20 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 10 Jul 2013 22:06:20 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: argh Message-ID: <20130710200620.D266C1C1055@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65326:93eb9d0127cf Date: 2013-07-10 22:05 +0200 http://bitbucket.org/pypy/pypy/changeset/93eb9d0127cf/ Log: argh diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -492,7 +492,7 @@ 'NEWUNICODE/1', '_MALLOC_LAST', 'FORCE_VIRTUALIZABLE/1d', # forces a non-standard virtualizable - '_CANMALLOC_LAST' + '_CANMALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', From noreply at buildbot.pypy.org Wed Jul 10 23:01:32 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 10 Jul 2013 23:01:32 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: Actually use the hint we developed Message-ID: <20130710210132.D586B1C1055@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65327:d529882ed496 Date: 2013-07-10 23:00 +0200 http://bitbucket.org/pypy/pypy/changeset/d529882ed496/ Log: Actually use the hint we developed diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -95,6 +95,7 @@ self.frame = None raise OperationError(space.w_StopIteration, space.w_None) else: + jit.hint(frame, force_virtualizable=True) return w_result # YIELDed finally: frame.f_backref = jit.vref_None diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2521,7 +2521,9 @@ # xxx only write back the fields really modified vbox = self.virtualizable_boxes[-1] if vbox is not box: - return # ignore the hint on non-standard virtualizable + # ignore the hint on non-standard virtualizable + # specifically, ignore it on a virtual + return for i in range(vinfo.num_static_extra_boxes): fieldbox = self.virtualizable_boxes[i] descr = vinfo.static_field_descrs[i] From noreply at buildbot.pypy.org Thu Jul 11 06:40:29 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 06:40:29 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix writing to write-ready objects after a minor collection Message-ID: <20130711044029.8A23E1C0325@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r382:191c168da60e Date: 2013-07-11 06:40 +0200 http://bitbucket.org/pypy/stmgc/changeset/191c168da60e/ Log: fix writing to write-ready objects after a minor collection diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -125,6 +125,9 @@ } /************************************************************/ +/* list for private/protected, old roots that need to be + kept in old_objects_to_trace */ +static __thread struct GcPtrList private_or_protected_roots = {0, 0, NULL}; static inline gcptr create_old_object_copy(gcptr obj) { @@ -204,6 +207,22 @@ (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ visit_if_young(end); + + /* if old, private or protected, this object needs to be + traced again in the next minor_collect if it is + currently in old_objects_to_trace. Because then + it may be seen as write-ready in the view of + someone: + pw = write_barrier(); push_root(pw); + minor_collect(); pw = pop_root(); // pw still write-ready + */ + if (item && item->h_tid & GCFLAG_OLD + && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in + obj_to_trace*/ + && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED + || item->h_revision == stm_private_rev_num)) { + gcptrlist_insert(&private_or_protected_roots, item); + } } else if (item != NULL) { if (item == END_MARKER_OFF) @@ -358,6 +377,19 @@ stmgc_trace(obj, &visit_if_young); } + + while (gcptrlist_size(&private_or_protected_roots) > 0) { + gcptr obj = gcptrlist_pop(&private_or_protected_roots); + /* if it has the write_barrier flag, clear it so that + it doesn't get inserted twice by a later write-barrier */ + if (obj->h_tid & GCFLAG_WRITE_BARRIER) { + /* only insert those that were in old_obj_to_trace + and that we didn't insert already */ + obj->h_tid &= ~GCFLAG_WRITE_BARRIER; + gcptrlist_insert(&d->old_objects_to_trace, obj); + dprintf(("re-add %p to old_objects_to_trace\n", obj)); + } + } } static void fix_list_of_read_objects(struct tx_descriptor *d) @@ -406,7 +438,7 @@ static void teardown_minor_collect(struct tx_descriptor *d) { - assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + //assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -205,36 +205,52 @@ assert list_of_read_objects() == [p2] def test_write_barrier_after_minor_collect(): - # maybe should fail. not sure. p = oalloc_refs(1) pw = lib.stm_write_barrier(p) lib.stm_push_root(pw) minor_collect() + lib.stm_push_root(ffi.NULL) + minor_collect() + lib.stm_pop_root() + minor_collect() r = nalloc(HDR) pw = lib.stm_pop_root() assert pw.h_tid & GCFLAG_OLD rawsetptr(pw, 0, r) - # pw not in old_objects_to_trace. A - # repeated write_barrier before - # rawsetptr() would fix that - + # pw needs to be readded to old_objects_to_trace + # before the next minor gc in order for this test to pass lib.stm_push_root(r) minor_collect() + minor_collect() + minor_collect() + q = nalloc(HDR) r2 = lib.stm_pop_root() check_nursery_free(r) pr = lib.stm_read_barrier(p) assert r != r2 - # these will fail because pw/pr was - # not traced in the last minor_collect, - # because they were not registered in - # old_objects_to_trace. assert getptr(pr, 0) != r assert getptr(pr, 0) == r2 + # the following shouldn't be done + # because pw was not saved. Just + # here to check that pw gets removed + # from old_objects_to_trace when not found + # on the root stack anymore + rawsetptr(pw, 0, q) + lib.stm_push_root(q) + minor_collect() + q2 = lib.stm_pop_root() + check_nursery_free(q) + pr = lib.stm_read_barrier(p) + assert q != q2 + assert getptr(pr, 0) == q + assert getptr(pr, 0) != q2 + + def test_id_young_to_old(): # move out of nursery with shadow original p = nalloc(HDR) diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -75,7 +75,7 @@ void _list_append(DuListObject *ob, DuObject *x) { - _du_read1(ob); + _du_write1(ob); DuTupleObject *olditems = ob->ob_tuple; _du_read1(olditems); @@ -85,8 +85,6 @@ DuTupleObject *newitems = DuTuple_New(newcount); _du_restore3(ob, x, olditems); - _du_write1(ob); - for (i=0; iob_items[i] = olditems->ob_items[i]; newitems->ob_items[newcount-1] = x; From noreply at buildbot.pypy.org Thu Jul 11 08:00:53 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 08:00:53 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: merge default Message-ID: <20130711060053.DD7351C1007@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r383:ee69aa1b8ef3 Date: 2013-07-11 07:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/ee69aa1b8ef3/ Log: merge default diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -125,6 +125,9 @@ } /************************************************************/ +/* list for private/protected, old roots that need to be + kept in old_objects_to_trace */ +static __thread struct GcPtrList private_or_protected_roots = {0, 0, NULL}; static inline gcptr create_old_object_copy(gcptr obj) { @@ -204,6 +207,22 @@ (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ visit_if_young(end); + + /* if old, private or protected, this object needs to be + traced again in the next minor_collect if it is + currently in old_objects_to_trace. Because then + it may be seen as write-ready in the view of + someone: + pw = write_barrier(); push_root(pw); + minor_collect(); pw = pop_root(); // pw still write-ready + */ + if (item && item->h_tid & GCFLAG_OLD + && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in + obj_to_trace*/ + && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED + || item->h_revision == stm_private_rev_num)) { + gcptrlist_insert(&private_or_protected_roots, item); + } } else if (item != NULL) { if (item == END_MARKER_OFF) @@ -358,6 +377,19 @@ stmgc_trace(obj, &visit_if_young); } + + while (gcptrlist_size(&private_or_protected_roots) > 0) { + gcptr obj = gcptrlist_pop(&private_or_protected_roots); + /* if it has the write_barrier flag, clear it so that + it doesn't get inserted twice by a later write-barrier */ + if (obj->h_tid & GCFLAG_WRITE_BARRIER) { + /* only insert those that were in old_obj_to_trace + and that we didn't insert already */ + obj->h_tid &= ~GCFLAG_WRITE_BARRIER; + gcptrlist_insert(&d->old_objects_to_trace, obj); + dprintf(("re-add %p to old_objects_to_trace\n", obj)); + } + } } static void fix_list_of_read_objects(struct tx_descriptor *d) @@ -410,7 +442,7 @@ static void teardown_minor_collect(struct tx_descriptor *d) { - assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + //assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -205,36 +205,52 @@ assert list_of_read_objects() == [p2] def test_write_barrier_after_minor_collect(): - # maybe should fail. not sure. p = oalloc_refs(1) pw = lib.stm_write_barrier(p) lib.stm_push_root(pw) minor_collect() + lib.stm_push_root(ffi.NULL) + minor_collect() + lib.stm_pop_root() + minor_collect() r = nalloc(HDR) pw = lib.stm_pop_root() assert pw.h_tid & GCFLAG_OLD rawsetptr(pw, 0, r) - # pw not in old_objects_to_trace. A - # repeated write_barrier before - # rawsetptr() would fix that - + # pw needs to be readded to old_objects_to_trace + # before the next minor gc in order for this test to pass lib.stm_push_root(r) minor_collect() + minor_collect() + minor_collect() + q = nalloc(HDR) r2 = lib.stm_pop_root() check_nursery_free(r) pr = lib.stm_read_barrier(p) assert r != r2 - # these will fail because pw/pr was - # not traced in the last minor_collect, - # because they were not registered in - # old_objects_to_trace. assert getptr(pr, 0) != r assert getptr(pr, 0) == r2 + # the following shouldn't be done + # because pw was not saved. Just + # here to check that pw gets removed + # from old_objects_to_trace when not found + # on the root stack anymore + rawsetptr(pw, 0, q) + lib.stm_push_root(q) + minor_collect() + q2 = lib.stm_pop_root() + check_nursery_free(q) + pr = lib.stm_read_barrier(p) + assert q != q2 + assert getptr(pr, 0) == q + assert getptr(pr, 0) != q2 + + def test_id_young_to_old(): # move out of nursery with shadow original p = nalloc(HDR) diff --git a/duhton/demo/trees.duh b/duhton/demo/trees.duh --- a/duhton/demo/trees.duh +++ b/duhton/demo/trees.duh @@ -1,15 +1,16 @@ (defun create-tree (n) - (if (< n 1) (list 1) (list (create-tree (/ n 2)) (create-tree (/ n 2)))) + (if (== n 0) 1 (cons (create-tree (- n 1)) (create-tree (- n 1)))) ) (defun walk-tree (tree) - (if (== (len tree) 1) (get tree 0) - (+ (walk-tree (get tree 0)) (walk-tree (get tree 1))) + (if (pair? tree) + (+ (walk-tree (car tree)) (walk-tree (cdr tree))) + 1 ) ) -(setq tree (create-tree 1024)) +(setq tree (create-tree 10)) (print (walk-tree tree)) (setq n 0) (while (< n 1000) diff --git a/duhton/demo/trees2.duh b/duhton/demo/trees2.duh --- a/duhton/demo/trees2.duh +++ b/duhton/demo/trees2.duh @@ -1,16 +1,18 @@ + (defun create-tree (n) - (if (< n 1) (list 1) (list (create-tree (/ n 2)) (create-tree (/ n 2)))) + (if (== n 0) 1 (cons (create-tree (- n 1)) (create-tree (- n 1)))) ) (defun walk-tree (tree) - (if (== (len tree) 1) (get tree 0) - (+ (walk-tree (get tree 0)) (walk-tree (get tree 1))) + (if (pair? tree) + (+ (walk-tree (car tree)) (walk-tree (cdr tree))) + 1 ) ) (defun lookup-tree () - (walk-tree (create-tree 1024)) + (walk-tree (create-tree 10)) ) (setq n 0) diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -527,6 +527,14 @@ return DuCons_Cdr(obj); } +DuObject *du_cons(DuObject *cons, DuObject *locals) +{ + DuObject *obj1, *obj2; + _du_getargs2("cons", cons, locals, &obj1, &obj2); + + return DuCons_New(obj1, obj2); +} + DuObject *du_not(DuObject *cons, DuObject *locals) { DuObject *obj; @@ -580,6 +588,13 @@ return DuInt_FromInt(res != NULL); } +DuObject *du_pair(DuObject *cons, DuObject *locals) +{ + DuObject *obj; + _du_getargs1("pair?", cons, locals, &obj); + return DuInt_FromInt(DuCons_Check(obj)); +} + DuObject *du_assert(DuObject *cons, DuObject *locals) { DuObject *obj; @@ -627,10 +642,12 @@ DuFrame_SetBuiltinMacro(Du_Globals, "defun", du_defun); DuFrame_SetBuiltinMacro(Du_Globals, "car", du_car); DuFrame_SetBuiltinMacro(Du_Globals, "cdr", du_cdr); + DuFrame_SetBuiltinMacro(Du_Globals, "cons", du_cons); DuFrame_SetBuiltinMacro(Du_Globals, "not", du_not); DuFrame_SetBuiltinMacro(Du_Globals, "transaction", du_transaction); DuFrame_SetBuiltinMacro(Du_Globals, "sleepms", du_sleepms); DuFrame_SetBuiltinMacro(Du_Globals, "defined?", du_defined); + DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); DuFrame_SetBuiltinMacro(Du_Globals, "assert", du_assert); DuFrame_SetSymbolStr(Du_Globals, "None", Du_None); } diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -75,7 +75,7 @@ void _list_append(DuListObject *ob, DuObject *x) { - _du_read1(ob); + _du_write1(ob); DuTupleObject *olditems = ob->ob_tuple; _du_read1(olditems); @@ -85,8 +85,6 @@ DuTupleObject *newitems = DuTuple_New(newcount); _du_restore3(ob, x, olditems); - _du_write1(ob); - for (i=0; iob_items[i] = olditems->ob_items[i]; newitems->ob_items[newcount-1] = x; diff --git a/duhton/test/test_cons.py b/duhton/test/test_cons.py --- a/duhton/test/test_cons.py +++ b/duhton/test/test_cons.py @@ -5,6 +5,11 @@ assert run("(print ())") == "None\n" assert run("(print None)") == "None\n" assert run("(print (quote (1 2 3)))") == "( 1 2 3 )\n" + assert run("(print (cons 1 2))") == "( 1 . 2 )\n" + +def test_pair(): + assert run("(print (pair? 1))") == "0\n" + assert run("(print (pair? (cons 1 2)))") == "1\n" def test_car_cdr(): assert run("(print (car (quote (2 3))))") == "2\n" From noreply at buildbot.pypy.org Thu Jul 11 08:00:54 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 08:00:54 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: cleanups Message-ID: <20130711060054.EF3011C1007@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r384:2e860d218333 Date: 2013-07-11 07:48 +0200 http://bitbucket.org/pypy/stmgc/changeset/2e860d218333/ Log: cleanups diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -380,12 +380,14 @@ && (next->h_revision & 1) /* needs to be a head rev */ && !(obj->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) { - /* XXX: WHY? */ + /* XXX: WHY never hit? */ assert(!(next->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(next->h_tid & GCFLAG_OLD); /* not moved already */ assert(next->h_original == (revision_t)obj); - assert(next->h_tid & GCFLAG_PUBLIC); + assert(next->h_tid & GCFLAG_PUBLIC); /* no priv/prot! + otherwise we'd need to fix more lists + like old_objects_to_trace */ assert(!(next->h_tid & GCFLAG_STUB)); assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); assert(!(next->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); @@ -404,7 +406,7 @@ obj->h_original = pre_hash; obj->h_tid = old_tid; - fprintf(stdout, "copy %p over prebuilt %p\n", next, obj); + dprintf(stdout, "copy %p over prebuilt %p\n", next, obj); /* Add this copied-over head revision to objects_to_trace because it (next) was added by the preceeding visit() @@ -422,7 +424,7 @@ } /* obj does not need tracing if it can't - be reached from somewhere else*/ + be reached from somewhere else */ } else { gcptrlist_insert(&objects_to_trace, obj); @@ -457,6 +459,9 @@ static void mark_all_stack_roots(void) { struct tx_descriptor *d; + struct G2L new_public_to_private; + memset(&new_public_to_private, 0, sizeof(struct G2L)); + for (d = stm_tx_head; d; d = d->tx_next) { assert(!stm_has_got_any_lock(d)); @@ -469,39 +474,36 @@ /* the current transaction's private copies of public objects */ wlog_t *item; - if (1 || d->active == 2) { - /* inevitable transactions need to have their pub_to_priv - fixed. Otherwise, they'll think their objects got outdated */ - /* XXX: others too, but maybe not worth it */ - struct G2L new_public_to_private; - memset(&new_public_to_private, 0, sizeof(struct G2L)); - fprintf(stdout, "start fixup (%p):\n", d); - G2L_LOOP_FORWARD(d->public_to_private, item) { - gcptr R = item->addr; - gcptr L = item->val; - if (!(R->h_tid & GCFLAG_OLD)) { - /* R was copied over its original */ - gcptr new_R = (gcptr)R->h_original; - g2l_insert(&new_public_to_private, new_R, L); - G2L_LOOP_DELETE(item); - - if (L->h_revision == (revision_t)R) { - L->h_revision = (revision_t)new_R; - fprintf(stdout," fixup %p to %p <-> %p\n", R, new_R, L); - } - else - fprintf(stdout," fixup %p to %p -> %p\n", R, new_R, L); + /* transactions need to have their pub_to_priv fixed. Otherwise, + they'll think their objects got outdated. Only absolutely + necessary for inevitable transactions (XXX check performance?). */ + dprintf(("start fixup (%p):\n", d)); + G2L_LOOP_FORWARD(d->public_to_private, item) { + gcptr R = item->addr; + gcptr L = item->val; + if (!(R->h_tid & GCFLAG_OLD)) { + /* R was copied over its original */ + gcptr new_R = (gcptr)R->h_original; + g2l_insert(&new_public_to_private, new_R, L); + G2L_LOOP_DELETE(item); + + if (L->h_revision == (revision_t)R) { + L->h_revision = (revision_t)new_R; + dprintf((" fixup %p to %p <-> %p\n", R, new_R, L)); } - } G2L_LOOP_END; - - /* copy to real pub_to_priv */ - G2L_LOOP_FORWARD(new_public_to_private, item) { - g2l_insert(&d->public_to_private, item->addr, item->val); - } G2L_LOOP_END; - g2l_delete_not_used_any_more(&new_public_to_private); - } + else + dprintf((" fixup %p to %p -> %p\n", R, new_R, L)); + } + } G2L_LOOP_END; + /* copy to real pub_to_priv */ + G2L_LOOP_FORWARD(new_public_to_private, item) { + g2l_insert(&d->public_to_private, item->addr, item->val); + } G2L_LOOP_END; + g2l_clear(&new_public_to_private); + + /* now visit them */ G2L_LOOP_FORWARD(d->public_to_private, item) { /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ @@ -542,6 +544,9 @@ assert(gcptrlist_size(&d->private_from_protected) == d->num_private_from_protected_known_old); } + + if (new_public_to_private.raw_start) + g2l_delete_not_used_any_more(&new_public_to_private); } static void cleanup_for_thread(struct tx_descriptor *d) @@ -558,12 +563,10 @@ gcptr obj = items[i]; assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - if (!(obj->h_tid & GCFLAG_OLD)) { - obj->h_tid |= GCFLAG_OLD; - items[i] = (gcptr)obj->h_revision; - assert(0); - } - else if (!(obj->h_tid & GCFLAG_VISITED)) { + /* we don't copy private / protected objects over prebuilts (yet) */ + assert(obj->h_tid & GCFLAG_OLD); + + if (!(obj->h_tid & GCFLAG_VISITED)) { /* forget 'obj' */ items[i] = items[--d->private_from_protected.size]; } @@ -586,29 +589,24 @@ assert(!(obj->h_tid & GCFLAG_STUB)); if (!(obj->h_tid & GCFLAG_OLD)) { - obj->h_tid |= GCFLAG_OLD; - obj = (gcptr)obj->h_revision; - items[i] = obj; + items[i] = (gcptr)obj->h_revision; } - - - /* Warning: in case the object listed is outdated and has been - replaced with a more recent revision, then it might be the - case that obj->h_revision doesn't have GCFLAG_VISITED, but - just removing it is very wrong --- we want 'd' to abort. - */ - if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + else if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* Warning: in case the object listed is outdated and has been + replaced with a more recent revision, then it might be the + case that obj->h_revision doesn't have GCFLAG_VISITED, but + just removing it is very wrong --- we want 'd' to abort. + */ /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; + + /* backup copies will never be candidates for copy over + prebuilts, because there is always the priv-from-prot + object inbetween */ + assert(obj->h_tid & GCFLAG_OLD); } - if (!(obj->h_tid & GCFLAG_OLD)) { - obj->h_tid |= GCFLAG_OLD; - obj = (gcptr)obj->h_revision; - items[i] = obj; - } - revision_t v = obj->h_revision; if (IS_POINTER(v)) { /* has a more recent revision. Oups. */ From noreply at buildbot.pypy.org Thu Jul 11 08:00:56 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 08:00:56 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: typo Message-ID: <20130711060056.1652E1C1007@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r385:844c3aeccf50 Date: 2013-07-11 07:52 +0200 http://bitbucket.org/pypy/stmgc/changeset/844c3aeccf50/ Log: typo diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -406,7 +406,7 @@ obj->h_original = pre_hash; obj->h_tid = old_tid; - dprintf(stdout, "copy %p over prebuilt %p\n", next, obj); + dprintf(("copy %p over prebuilt %p\n", next, obj)); /* Add this copied-over head revision to objects_to_trace because it (next) was added by the preceeding visit() From noreply at buildbot.pypy.org Thu Jul 11 08:00:57 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 08:00:57 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: fix bad cleanup :( Message-ID: <20130711060057.30DF41C1007@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r386:b2090bd31f4d Date: 2013-07-11 08:00 +0200 http://bitbucket.org/pypy/stmgc/changeset/b2090bd31f4d/ Log: fix bad cleanup :( diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -589,7 +589,8 @@ assert(!(obj->h_tid & GCFLAG_STUB)); if (!(obj->h_tid & GCFLAG_OLD)) { - items[i] = (gcptr)obj->h_revision; + obj = (gcptr)obj->h_revision; + items[i] = obj; } else if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { /* Warning: in case the object listed is outdated and has been From noreply at buildbot.pypy.org Thu Jul 11 09:01:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 09:01:04 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: We can't leave getarraysize or the immutable getfields Message-ID: <20130711070104.4B3221C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65328:e5fba2018208 Date: 2013-07-11 08:25 +0200 http://bitbucket.org/pypy/pypy/changeset/e5fba2018208/ Log: We can't leave getarraysize or the immutable getfields fully unmodified. We'd need at least some lightweight read barrier to detect stubs. For now we just put a regular read barrier. diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -71,11 +71,17 @@ wants_a_barrier = {} expand_comparison = set() for op in block.operations: + # [1] XXX we can't leave getarraysize or the immutable getfields + # fully unmodified. We'd need at least some lightweight + # read barrier to detect stubs. For now we just put a + # regular read barrier. if (op.opname in ('getfield', 'getarrayitem', - 'getinteriorfield') and + 'getinteriorfield', + 'getarraysize', 'getinteriorarraysize', # XXX [1] + ) and op.result.concretetype is not lltype.Void and op.args[0].concretetype.TO._gckind == 'gc' and - not is_immutable(op)): + True): #not is_immutable(op)): XXX see [1] wants_a_barrier.setdefault(op, 'R') elif (op.opname in ('setfield', 'setarrayitem', 'setinteriorfield') and From noreply at buildbot.pypy.org Thu Jul 11 09:14:23 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 11 Jul 2013 09:14:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Allow using types as a part of a hash key. Message-ID: <20130711071423.812CB1C1007@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65329:9e66586a4360 Date: 2013-07-11 17:13 +1000 http://bitbucket.org/pypy/pypy/changeset/9e66586a4360/ Log: Allow using types as a part of a hash key. diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -69,6 +69,7 @@ ('subclassrange_max', Signed), ('rtti', Ptr(RuntimeTypeInfo)), ('name', Ptr(Array(Char))), + ('hash', Signed), ('instantiate', Ptr(FuncType([], OBJECTPTR))), hints = {'immutable': True})) # non-gc case @@ -185,6 +186,7 @@ """Initialize the 'self' portion of the 'vtable' belonging to the given subclass.""" if self.classdef is None: + vtable.hash = hash(rsubcls) # initialize the 'subclassrange_*' and 'name' fields if rsubcls.classdef is not None: #vtable.parenttypeptr = rsubcls.rbase.getvtable() diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -401,6 +401,20 @@ def getlowleveltype(self): return rclass.CLASSTYPE + def get_ll_hash_function(self): + return ll_cls_hash + + get_ll_fasthash_function = get_ll_hash_function + + def get_ll_eq_function(self): + return None + + +def ll_cls_hash(cls): + if not cls: + return 0 + else: + return cls.hash # ____________________________________________________________ diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -519,6 +519,32 @@ res = self.interpret(f, [0]) assert res == 4 + def test_prebuilt_cls_dict(self): + class A(object): + pass + + class B(A): + pass + + d = {(A, 3): 3, (B, 0): 4} + + def f(i): + if i: + cls = A + else: + cls = B + try: + return d[cls, i] + except KeyError: + return -99 + + res = self.interpret(f, [0]) + assert res == 4 + res = self.interpret(f, [3]) + assert res == 3 + res = self.interpret(f, [10]) + assert res == -99 + def test_access_in_try(self): def f(d): try: From noreply at buildbot.pypy.org Thu Jul 11 09:22:22 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Thu, 11 Jul 2013 09:22:22 +0200 (CEST) Subject: [pypy-commit] pypy package-tk: package Tkinter Message-ID: <20130711072222.167251C1007@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: package-tk Changeset: r65330:48475b959b61 Date: 2013-07-10 22:25 +0200 http://bitbucket.org/pypy/pypy/changeset/48475b959b61/ Log: package Tkinter diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -70,6 +70,7 @@ if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] From noreply at buildbot.pypy.org Thu Jul 11 09:22:23 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Thu, 11 Jul 2013 09:22:23 +0200 (CEST) Subject: [pypy-commit] pypy package-tk: --without-tk package.py option Message-ID: <20130711072223.598981C1007@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: package-tk Changeset: r65331:ad25e15d7a9a Date: 2013-07-11 00:33 +0200 http://bitbucket.org/pypy/pypy/changeset/ad25e15d7a9a/ Log: --without-tk package.py option diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. @@ -46,7 +46,8 @@ os.system("chmod -R g-w %s" % basedir) def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', - copy_to_dir = None, override_pypy_c = None, nostrip=False): + copy_to_dir=None, override_pypy_c=None, nostrip=False, + withouttk=False): basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' @@ -70,7 +71,14 @@ if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) - subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. +You can either install Tk development headers package or +add --without-tk option to skip packaging Tk.""" + sys.exit(1) if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -113,10 +121,12 @@ shutil.copytree(str(basedir.join('lib-python').join(STDLIB_VER)), str(pypydir.join('lib-python').join(STDLIB_VER)), ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~')) + ignore = ['.svn', 'py', '*.pyc', '*~', '*.c', '*.o'] + if withouttk: + ignore.append('_tkinter') shutil.copytree(str(basedir.join('lib_pypy')), str(pypydir.join('lib_pypy')), - ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~', - '*.c', '*.o')) + ignore=ignore_patterns(*ignore)) for file in ['LICENSE', 'README.rst']: shutil.copy(str(basedir.join(file)), str(pypydir)) headers = includedir.listdir('*.h') + includedir.listdir('*.inl') @@ -184,14 +194,28 @@ print "Ready in %s" % (builddir,) return builddir # for tests + +def print_usage(): + print >>sys.stderr, __doc__ + sys.exit(1) + + if __name__ == '__main__': if len(sys.argv) == 1: - print >>sys.stderr, __doc__ - sys.exit(1) - else: - args = sys.argv[1:] - kw = {} - if args[0] == '--nostrip': + print_usage() + + args = sys.argv[1:] + kw = {} + + for i, arg in enumerate(args): + if arg == '--nostrip': kw['nostrip'] = True - args = args[1:] - package(*args, **kw) + elif arg == '--without-tk': + kw['withouttk'] = True + elif not arg.startswith('--'): + break + else: + print_usage() + + args = args[i:] + package(*args, **kw) From noreply at buildbot.pypy.org Thu Jul 11 09:22:30 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Thu, 11 Jul 2013 09:22:30 +0200 (CEST) Subject: [pypy-commit] pypy package-tk: dont ignore _tkinter Message-ID: <20130711072230.C8AC51C1007@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: package-tk Changeset: r65332:68b16b69bdf5 Date: 2013-07-11 00:56 +0200 http://bitbucket.org/pypy/pypy/changeset/68b16b69bdf5/ Log: dont ignore _tkinter diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -77,7 +77,7 @@ except subprocess.CalledProcessError: print >>sys.stderr, """Building Tk bindings failed. You can either install Tk development headers package or -add --without-tk option to skip packaging Tk.""" +add --without-tk option to skip packaging binary CFFI extension.""" sys.exit(1) if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' @@ -121,12 +121,10 @@ shutil.copytree(str(basedir.join('lib-python').join(STDLIB_VER)), str(pypydir.join('lib-python').join(STDLIB_VER)), ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~')) - ignore = ['.svn', 'py', '*.pyc', '*~', '*.c', '*.o'] - if withouttk: - ignore.append('_tkinter') shutil.copytree(str(basedir.join('lib_pypy')), str(pypydir.join('lib_pypy')), - ignore=ignore_patterns(*ignore)) + ignore=ignore_patterns('.svn', 'py', '*.pyc', '*~', + '*.c', '*.o')) for file in ['LICENSE', 'README.rst']: shutil.copy(str(basedir.join(file)), str(pypydir)) headers = includedir.listdir('*.h') + includedir.listdir('*.inl') From noreply at buildbot.pypy.org Thu Jul 11 09:22:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 09:22:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in squeaky/pypy-ldflags/package-tk (pull request #159) Message-ID: <20130711072239.35C0D1C1007@cobra.cs.uni-duesseldorf.de> Author: arigo Branch: Changeset: r65333:a1bc5d217c4a Date: 2013-07-11 09:21 +0200 http://bitbucket.org/pypy/pypy/changeset/a1bc5d217c4a/ Log: Merged in squeaky/pypy-ldflags/package-tk (pull request #159) package Tkinter diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. @@ -46,7 +46,8 @@ os.system("chmod -R g-w %s" % basedir) def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', - copy_to_dir = None, override_pypy_c = None, nostrip=False): + copy_to_dir=None, override_pypy_c=None, nostrip=False, + withouttk=False): basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' @@ -70,6 +71,14 @@ if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. +You can either install Tk development headers package or +add --without-tk option to skip packaging binary CFFI extension.""" + sys.exit(1) if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -183,14 +192,28 @@ print "Ready in %s" % (builddir,) return builddir # for tests + +def print_usage(): + print >>sys.stderr, __doc__ + sys.exit(1) + + if __name__ == '__main__': if len(sys.argv) == 1: - print >>sys.stderr, __doc__ - sys.exit(1) - else: - args = sys.argv[1:] - kw = {} - if args[0] == '--nostrip': + print_usage() + + args = sys.argv[1:] + kw = {} + + for i, arg in enumerate(args): + if arg == '--nostrip': kw['nostrip'] = True - args = args[1:] - package(*args, **kw) + elif arg == '--without-tk': + kw['withouttk'] = True + elif not arg.startswith('--'): + break + else: + print_usage() + + args = args[i:] + package(*args, **kw) From noreply at buildbot.pypy.org Thu Jul 11 09:47:37 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 11 Jul 2013 09:47:37 +0200 (CEST) Subject: [pypy-commit] pypy default: add another point to the list Message-ID: <20130711074737.4CE0E1C1007@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65335:a5724d9b779d Date: 2013-07-11 09:46 +0200 http://bitbucket.org/pypy/pypy/changeset/a5724d9b779d/ Log: add another point to the list diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -22,6 +22,8 @@ * Stacklet support on ARM +* Interpreter improvements + * Various numpy improvements * Bugfixes to cffi and ctypes From noreply at buildbot.pypy.org Thu Jul 11 09:47:36 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 11 Jul 2013 09:47:36 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Added tag pypy-2.1-beta for changeset daf1b0412bfb Message-ID: <20130711074736.21A481C02E4@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65334:bed81bc9e5ca Date: 2013-07-11 09:44 +0200 http://bitbucket.org/pypy/pypy/changeset/bed81bc9e5ca/ Log: Added tag pypy-2.1-beta for changeset daf1b0412bfb diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,5 @@ 347c6b01847308411f19c06f16ebe8945f55aa84 pypy-2.1-beta 347c6b01847308411f19c06f16ebe8945f55aa84 pypy-2.1-beta a0e2bc9ceccdd7e734d4c881a051320441ea5200 pypy-2.1-beta +a0e2bc9ceccdd7e734d4c881a051320441ea5200 pypy-2.1-beta +daf1b0412bfbd0666c19d567e37b29e4a3be5734 pypy-2.1-beta From noreply at buildbot.pypy.org Thu Jul 11 10:14:05 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 10:14:05 +0200 (CEST) Subject: [pypy-commit] stmgc default: add cache of writeables to demo_random.c and fix a bug Message-ID: <20130711081405.5A4BD1C1007@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r387:9c2b50efb633 Date: 2013-07-11 10:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/9c2b50efb633/ Log: add cache of writeables to demo_random.c and fix a bug diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -52,6 +52,12 @@ // global and per-thread-data time_t default_seed; gcptr shared_roots[SHARED_ROOTS]; + +#define CACHE_MASK 65535 +#define CACHE_ENTRIES ((CACHE_MASK + 1) / sizeof(char *)) +#define CACHE_AT(cache, obj) (*(gcptr *)((char *)(cache) \ + + ((revision_t)(obj) & CACHE_MASK))) + struct thread_data { unsigned int thread_seed; gcptr roots[MAXROOTS]; @@ -61,6 +67,7 @@ int steps_left; int interruptible; int atomic; + revision_t writeable[CACHE_ENTRIES]; }; __thread struct thread_data td; @@ -140,6 +147,7 @@ } } +__thread revision_t temp_cache[CACHE_ENTRIES]; void pop_roots() { int i; @@ -148,6 +156,8 @@ td.roots[i] = stm_pop_root(); check(td.roots[i]); } + /* some objects may have changed positions */ + memset(td.writeable, 0, sizeof(td.writeable)); } void del_root(int idx) @@ -227,6 +237,7 @@ if (p != NULL) { check(p); w = stm_write_barrier(p); + CACHE_AT(td.writeable, w) = w; check(w); assert(is_private(w)); } @@ -298,6 +309,8 @@ void setup_thread() { int i; + memset(&td, 0, sizeof(struct thread_data)); + td.thread_seed = default_seed++; td.steps_left = STEPS_PER_THREAD; td.interruptible = 0; @@ -395,7 +408,10 @@ break; case 7: // set 'p' as *next in one of the roots check(_r); - w_r = (nodeptr)write_barrier(_r); + if (CACHE_AT(td.writeable, _r) == _r) + w_r = (nodeptr)_r; + else + w_r = (nodeptr)write_barrier(_r); check((gcptr)w_r); check(p); w_r->next = (struct node*)p; @@ -454,7 +470,10 @@ assert(w_t->id == stm_id((gcptr)_t)); } else { - w_t = (nodeptr)write_barrier(_t); + if (CACHE_AT(td.writeable, _t) == _t) + w_t = (nodeptr)_t; + else + w_t = (nodeptr)write_barrier(_t); w_t->id = stm_id((gcptr)w_t); assert(w_t->id == stm_id((gcptr)_t)); } @@ -470,7 +489,10 @@ assert(w_t->hash == stm_hash((gcptr)_t)); } else { - w_t = (nodeptr)write_barrier(_t); + if (CACHE_AT(td.writeable, _t) == _t) + w_t = (nodeptr)_t; + else + w_t = (nodeptr)write_barrier(_t); w_t->hash = stm_hash((gcptr)w_t); assert(w_t->hash == stm_hash((gcptr)_t)); } @@ -538,6 +560,8 @@ td.interruptible = 0; pop_roots(); + + memset(&td.writeable, 0, sizeof(td.writeable)); } @@ -558,6 +582,7 @@ stm_push_root(end_marker); int p = run_me(); + if (p == -1) // maybe restart transaction return get_rand(3) != 1; @@ -567,6 +592,10 @@ int run_me() { gcptr p = NULL; + + // clear cache of writeables: + memset(&td.writeable, 0, sizeof(td.writeable)); + while (td.steps_left-->0 || td.atomic) { if (td.steps_left % 8 == 0) fprintf(stdout, "#"); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -207,8 +207,8 @@ (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ visit_if_young(end); - - /* if old, private or protected, this object needs to be + item = *end; + /* if private or protected, this object needs to be traced again in the next minor_collect if it is currently in old_objects_to_trace. Because then it may be seen as write-ready in the view of @@ -216,7 +216,7 @@ pw = write_barrier(); push_root(pw); minor_collect(); pw = pop_root(); // pw still write-ready */ - if (item && item->h_tid & GCFLAG_OLD + if (item && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in obj_to_trace*/ && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -250,6 +250,25 @@ assert getptr(pr, 0) == q assert getptr(pr, 0) != q2 +def test_write_barrier_after_minor_collect_young_to_old(): + p = nalloc_refs(1) + pw = lib.stm_write_barrier(p) + + lib.stm_push_root(pw) + minor_collect() + r = nalloc(HDR) + pw = lib.stm_pop_root() + + check_nursery_free(p) + assert pw.h_tid & GCFLAG_OLD + rawsetptr(pw, 0, r) + + lib.stm_push_root(pw) + minor_collect() + pw = lib.stm_pop_root() + check_nursery_free(r) + + assert getptr(pw, 0) != r def test_id_young_to_old(): # move out of nursery with shadow original From noreply at buildbot.pypy.org Thu Jul 11 10:14:42 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 11 Jul 2013 10:14:42 +0200 (CEST) Subject: [pypy-commit] pypy default: update contributor list Message-ID: <20130711081442.249031C1007@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65336:6108305f9089 Date: 2013-07-11 10:05 +0200 http://bitbucket.org/pypy/pypy/changeset/6108305f9089/ Log: update contributor list diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -35,179 +35,242 @@ the beginning of each file) the files in the 'pypy' directory are each copyrighted by one or more of the following people and organizations: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Amaury Forgeot d'Arc - Antonio Cuni - Samuele Pedroni - Michael Hudson - Holger Krekel - Alex Gaynor - Christian Tismer - Hakan Ardo - Benjamin Peterson - David Schneider - Eric van Riet Paap - Anders Chrigstrom - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Lukas Diekmann - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Sven Hager - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Justin Peel - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Laura Creighton - Adrien Di Mascio - Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone - John Witulski - Timo Paulssen - holger krekel - Dario Bertini - Mark Pearse - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Ilya Osadchiy - Ronny Pfannschmidt - Adrian Kuhn - tav - Georg Brandl - Philip Jenvey - Gerald Klix - Wanja Saatkamp - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Jeff Terrace - Lukas Renggli - Guenter Jantzen - Ned Batchelder - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Artur Lisiecki - Bruno Gola - Ignas Mikalajunas - Stefano Rivera - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz - Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz From noreply at buildbot.pypy.org Thu Jul 11 10:14:43 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 11 Jul 2013 10:14:43 +0200 (CEST) Subject: [pypy-commit] pypy default: updates license information and indentation fixes Message-ID: <20130711081443.C71C51C1007@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65337:99fc5f3e9026 Date: 2013-07-11 10:13 +0200 http://bitbucket.org/pypy/pypy/changeset/99fc5f3e9026/ Log: updates license information and indentation fixes diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -35,242 +35,242 @@ the beginning of each file) the files in the 'pypy' directory are each copyrighted by one or more of the following people and organizations: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Alex Gaynor - Michael Hudson - David Schneider - Holger Krekel - Christian Tismer - Hakan Ardo - Benjamin Peterson - Matti Picus - Philip Jenvey - Anders Chrigstrom - Brian Kearns - Eric van Riet Paap - Richard Emslie - Alexander Schremmer - Wim Lavrijsen - Dan Villiom Podlaski Christiansen - Manuel Jacob - Lukas Diekmann - Sven Hager - Anders Lehmann - Aurelien Campeas - Niklaus Haldimann - Ronan Lamy - Camillo Bruni - Laura Creighton - Toon Verwaest - Leonardo Santagada - Seo Sanghyeon - Justin Peel - Ronny Pfannschmidt - David Edelsohn - Anders Hammarquist - Jakub Gustak - Guido Wesdorp - Lawrence Oluyede - Bartosz Skowron - Daniel Roberts - Niko Matsakis - Adrien Di Mascio - Ludovic Aubry - Alexander Hesse - Jacob Hallen - Romain Guillebert - Jason Creighton - Alex Martelli - Michal Bendowski - Jan de Mooij - Michael Foord - Stephan Diehl - Stefan Schwarzer - Valentino Volonghi - Tomek Meka - Patrick Maupin - stian - Bob Ippolito - Bruno Gola - Jean-Paul Calderone - Timo Paulssen - Alexandre Fayolle - Simon Burton - Marius Gedminas - John Witulski - Greg Price - Dario Bertini - Mark Pearse - Simon Cross - Konstantin Lopuhin - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Paul deGrandis - Ilya Osadchiy - Adrian Kuhn - Boris Feigin - tav - Georg Brandl - Bert Freudenberg - Stian Andreassen - Stefano Rivera - Wanja Saatkamp - Gerald Klix - Mike Blume - Taavi Burns - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Preston Timmons - Jeff Terrace - David Ripton - Dusty Phillips - Lukas Renggli - Guenter Jantzen - Tobias Oberstein - Remi Meier - Ned Batchelder - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Jason Chu - Igor Trindade Oliveira - Jeremy Thurgood - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Lucian Branescu Mihaila - Tim Felgentreff - Tyler Wade - Gabriel Lavoie - Olivier Dormond - Jared Grubb - Karl Bartel - Brian Dorsey - Victor Stinner - Stuart Williams - Jasper Schulz - Toby Watson - Antoine Pitrou - Aaron Iles - Michael Cheng - Justas Sadzevicius - Gasper Zejn - Neil Shepperd - Mikael Schönenberg - Elmo Mäntynen - Tobias Pape - Jonathan David Riehl - Stanislaw Halik - Anders Qvist - Chirag Jadwani - Beatrice During - Alex Perry - Vincent Legoll - Alan McIntyre - Alexander Sedov - Corbin Simpson - Christopher Pope - Laurence Tratt - Guillebert Romain - Christian Tismer - Dan Stromberg - Stefano Parmesan - Christian Hudon - Alexis Daboville - Jens-Uwe Mager - Carl Meyer - Karl Ramm - Pieter Zieschang - Gabriel - Paweł Piotr Przeradowski - Andrew Dalke - Sylvain Thenault - Nathan Taylor - Vladimir Kryachko - Jacek Generowicz - Alejandro J. Cura - Jacob Oscarson - Travis Francis Athougies - Kristjan Valur Jonsson - Neil Blakey-Milner - Lutz Paelike - Lucio Torre - Lars Wassermann - Henrik Vendelbo - Dan Buch - Miguel de Val Borro - Artur Lisiecki - Sergey Kishchenko - Ignas Mikalajunas - Christoph Gerum - Martin Blais - Lene Wagner - Tomo Cocoa - Andrews Medina - roberto at goyle - William Leslie - Bobby Impollonia - timo at eistee.fritz.box - Andrew Thompson - Yusei Tahara - Roberto De Ioris - Juan Francisco Cantero Hurtado - Godefroid Chappelle - Joshua Gilbert - Dan Colish - Christopher Armstrong - Michael Hudson-Doyle - Anders Sigfridsson - Yasir Suhail - Floris Bruynooghe - Akira Li - Gustavo Niemeyer - Stephan Busemann - Anna Katrina Dominguez - Christian Muirhead - James Lan - shoma hosaka - Daniel Neuhäuser - Buck Golemon - Konrad Delong - Dinu Gherman - Chris Lambacher - coolbutuseless at gmail.com - Jim Baker - Rodrigo Araújo - Armin Ronacher - Brett Cannon - yrttyr - Zooko Wilcox-O Hearn - Tomer Chachamu - Christopher Groskopf - opassembler.py - Antony Lee - Jim Hunziker - Markus Unterwaditzer - Even Wiik Thomassen - jbs - soareschen - Flavio Percoco - Kristoffer Kleine - yasirs - Michael Chermside - Anna Ravencroft - Andrew Chambers - Julien Phalip - Dan Loewenherz + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -287,27 +287,26 @@ by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: From noreply at buildbot.pypy.org Thu Jul 11 10:17:33 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 10:17:33 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: merge default Message-ID: <20130711081733.C1DBD1C1007@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r388:dba9439565bd Date: 2013-07-11 10:14 +0200 http://bitbucket.org/pypy/stmgc/changeset/dba9439565bd/ Log: merge default diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -52,6 +52,12 @@ // global and per-thread-data time_t default_seed; gcptr shared_roots[SHARED_ROOTS]; + +#define CACHE_MASK 65535 +#define CACHE_ENTRIES ((CACHE_MASK + 1) / sizeof(char *)) +#define CACHE_AT(cache, obj) (*(gcptr *)((char *)(cache) \ + + ((revision_t)(obj) & CACHE_MASK))) + struct thread_data { unsigned int thread_seed; gcptr roots[MAXROOTS]; @@ -61,6 +67,7 @@ int steps_left; int interruptible; int atomic; + revision_t writeable[CACHE_ENTRIES]; }; __thread struct thread_data td; @@ -140,6 +147,7 @@ } } +__thread revision_t temp_cache[CACHE_ENTRIES]; void pop_roots() { int i; @@ -148,6 +156,8 @@ td.roots[i] = stm_pop_root(); check(td.roots[i]); } + /* some objects may have changed positions */ + memset(td.writeable, 0, sizeof(td.writeable)); } void del_root(int idx) @@ -227,6 +237,7 @@ if (p != NULL) { check(p); w = stm_write_barrier(p); + CACHE_AT(td.writeable, w) = w; check(w); assert(is_private(w)); } @@ -298,6 +309,8 @@ void setup_thread() { int i; + memset(&td, 0, sizeof(struct thread_data)); + td.thread_seed = default_seed++; td.steps_left = STEPS_PER_THREAD; td.interruptible = 0; @@ -395,7 +408,10 @@ break; case 7: // set 'p' as *next in one of the roots check(_r); - w_r = (nodeptr)write_barrier(_r); + if (CACHE_AT(td.writeable, _r) == _r) + w_r = (nodeptr)_r; + else + w_r = (nodeptr)write_barrier(_r); check((gcptr)w_r); check(p); w_r->next = (struct node*)p; @@ -454,7 +470,10 @@ assert(w_t->id == stm_id((gcptr)_t)); } else { - w_t = (nodeptr)write_barrier(_t); + if (CACHE_AT(td.writeable, _t) == _t) + w_t = (nodeptr)_t; + else + w_t = (nodeptr)write_barrier(_t); w_t->id = stm_id((gcptr)w_t); assert(w_t->id == stm_id((gcptr)_t)); } @@ -470,7 +489,10 @@ assert(w_t->hash == stm_hash((gcptr)_t)); } else { - w_t = (nodeptr)write_barrier(_t); + if (CACHE_AT(td.writeable, _t) == _t) + w_t = (nodeptr)_t; + else + w_t = (nodeptr)write_barrier(_t); w_t->hash = stm_hash((gcptr)w_t); assert(w_t->hash == stm_hash((gcptr)_t)); } @@ -538,6 +560,8 @@ td.interruptible = 0; pop_roots(); + + memset(&td.writeable, 0, sizeof(td.writeable)); } @@ -558,6 +582,7 @@ stm_push_root(end_marker); int p = run_me(); + if (p == -1) // maybe restart transaction return get_rand(3) != 1; @@ -567,6 +592,10 @@ int run_me() { gcptr p = NULL; + + // clear cache of writeables: + memset(&td.writeable, 0, sizeof(td.writeable)); + while (td.steps_left-->0 || td.atomic) { if (td.steps_left % 8 == 0) fprintf(stdout, "#"); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -207,8 +207,8 @@ (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ visit_if_young(end); - - /* if old, private or protected, this object needs to be + item = *end; + /* if private or protected, this object needs to be traced again in the next minor_collect if it is currently in old_objects_to_trace. Because then it may be seen as write-ready in the view of @@ -216,7 +216,7 @@ pw = write_barrier(); push_root(pw); minor_collect(); pw = pop_root(); // pw still write-ready */ - if (item && item->h_tid & GCFLAG_OLD + if (item && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in obj_to_trace*/ && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -250,6 +250,25 @@ assert getptr(pr, 0) == q assert getptr(pr, 0) != q2 +def test_write_barrier_after_minor_collect_young_to_old(): + p = nalloc_refs(1) + pw = lib.stm_write_barrier(p) + + lib.stm_push_root(pw) + minor_collect() + r = nalloc(HDR) + pw = lib.stm_pop_root() + + check_nursery_free(p) + assert pw.h_tid & GCFLAG_OLD + rawsetptr(pw, 0, r) + + lib.stm_push_root(pw) + minor_collect() + pw = lib.stm_pop_root() + check_nursery_free(r) + + assert getptr(pw, 0) != r def test_id_young_to_old(): # move out of nursery with shadow original From noreply at buildbot.pypy.org Thu Jul 11 10:17:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 10:17:57 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Minor updates Message-ID: <20130711081757.3786D1C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4981:710154915480 Date: 2013-07-11 10:17 +0200 http://bitbucket.org/pypy/extradoc/changeset/710154915480/ Log: Minor updates diff --git a/blog/draft/duhton.rst b/blog/draft/duhton.rst --- a/blog/draft/duhton.rst +++ b/blog/draft/duhton.rst @@ -9,10 +9,11 @@ that can be used from any C program with enough effort. Using it is more than a little mundane, since you have to inserts write and read barriers by hand everywhere in your code that reads or writes to garbage collector controlled -memory. Once we finish PyPy integration, this manual work is done automatically +memory. In the PyPy integration, this manual work is done automatically by the STM transformation in the interpreter. -However, to experiment some more, we created a `lisp interpreter`_ +However, to experiment some more, we created a minimal +`lisp-like/scheme-like interpreter`_ (called Duhton), that follows closely CPython's implementation strategy. For anyone familiar with CPython's source code, it should be pretty readable. This interpreter works like a normal and very basic lisp variant, From noreply at buildbot.pypy.org Thu Jul 11 10:21:20 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 11 Jul 2013 10:21:20 +0200 (CEST) Subject: [pypy-commit] pypy default: update release info Message-ID: <20130711082120.0C8E91C1007@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65338:d126b027561c Date: 2013-07-11 10:19 +0200 http://bitbucket.org/pypy/pypy/changeset/d126b027561c/ Log: update release info diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -32,11 +32,10 @@ * go to pypy/tool/release and run: force-builds.py /release/ * wait for builds to complete, make sure there are no failures -* run pypy/tool/release/make_release.py, this will build necessary binaries - and upload them to pypy.org +* upload binaries to https://bitbucket.org/pypy/pypy/downloads Following binaries should be built, however, we need more buildbots: - JIT: windows, linux, os/x + JIT: windows, linux, os/x, armhf, armel no JIT: windows, linux, os/x sandbox: linux, os/x diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -28,6 +28,9 @@ 'pypy-c-jit-linux-x86-64', 'pypy-c-jit-macosx-x86-64', 'pypy-c-jit-win-x86-32', + 'build-pypy-c-jit-linux-armhf-raring', + 'build-pypy-c-jit-linux-armhf-raspbian', + 'build-pypy-c-jit-linux-armel', ] def main(): From noreply at buildbot.pypy.org Thu Jul 11 10:35:16 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 11 Jul 2013 10:35:16 +0200 (CEST) Subject: [pypy-commit] pypy default: add King's College London as license holder Message-ID: <20130711083516.A5B461C0EF6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65339:9ea70ae661e0 Date: 2013-07-11 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/9ea70ae661e0/ Log: add King's College London as license holder diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -282,6 +282,7 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike From noreply at buildbot.pypy.org Thu Jul 11 11:10:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 11:10:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: Test '/' Message-ID: <20130711091010.E7A8B1C0EF6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r389:9a3faf0b3c1b Date: 2013-07-11 11:09 +0200 http://bitbucket.org/pypy/stmgc/changeset/9a3faf0b3c1b/ Log: Test '/' diff --git a/duhton/test/test_int.py b/duhton/test/test_int.py --- a/duhton/test/test_int.py +++ b/duhton/test/test_int.py @@ -20,6 +20,10 @@ assert evaluate("(* 2 3 7)") == 42 assert evaluate("(* (+ 5 1) (+ 6 1))") == 42 +def test_div(): + assert evaluate("(/ 11 2)") == 5 + assert evaluate("(/ 29 2 3)") == 4 + def test_cmp(): assert evaluate("(< 6 6)") == 0 assert evaluate("(<= 6 6)") == 1 From noreply at buildbot.pypy.org Thu Jul 11 11:21:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 11:21:07 +0200 (CEST) Subject: [pypy-commit] stmgc default: untabiffy Message-ID: <20130711092107.DEB721C303E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r390:2bb679834f30 Date: 2013-07-11 11:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/2bb679834f30/ Log: untabiffy diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -199,7 +199,7 @@ DuObject *du_div(DuObject *cons, DuObject *locals) { int result = 0; - int first = 1; + int first = 1; while (cons != Du_None) { _du_read1(cons); @@ -208,12 +208,12 @@ _du_save2(next, locals); DuObject *obj = Du_Eval(expr, locals); - if (first) { - result = DuInt_AsInt(obj); - first = 0; - } else { - result /= DuInt_AsInt(obj); - } + if (first) { + result = DuInt_AsInt(obj); + first = 0; + } else { + result /= DuInt_AsInt(obj); + } _du_restore2(next, locals); cons = next; @@ -612,8 +612,8 @@ void Du_Initialize(int num_threads) { stm_initialize(); - all_threads_count = num_threads; - all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); + all_threads_count = num_threads; + all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn); DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); @@ -621,7 +621,7 @@ DuFrame_SetBuiltinMacro(Du_Globals, "+", du_add); DuFrame_SetBuiltinMacro(Du_Globals, "-", du_sub); DuFrame_SetBuiltinMacro(Du_Globals, "*", du_mul); - DuFrame_SetBuiltinMacro(Du_Globals, "/", du_div); + DuFrame_SetBuiltinMacro(Du_Globals, "/", du_div); DuFrame_SetBuiltinMacro(Du_Globals, "<", du_lt); DuFrame_SetBuiltinMacro(Du_Globals, "<=", du_le); DuFrame_SetBuiltinMacro(Du_Globals, "==", du_eq); @@ -642,12 +642,12 @@ DuFrame_SetBuiltinMacro(Du_Globals, "defun", du_defun); DuFrame_SetBuiltinMacro(Du_Globals, "car", du_car); DuFrame_SetBuiltinMacro(Du_Globals, "cdr", du_cdr); - DuFrame_SetBuiltinMacro(Du_Globals, "cons", du_cons); + DuFrame_SetBuiltinMacro(Du_Globals, "cons", du_cons); DuFrame_SetBuiltinMacro(Du_Globals, "not", du_not); DuFrame_SetBuiltinMacro(Du_Globals, "transaction", du_transaction); DuFrame_SetBuiltinMacro(Du_Globals, "sleepms", du_sleepms); DuFrame_SetBuiltinMacro(Du_Globals, "defined?", du_defined); - DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); + DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); DuFrame_SetBuiltinMacro(Du_Globals, "assert", du_assert); DuFrame_SetSymbolStr(Du_Globals, "None", Du_None); } From noreply at buildbot.pypy.org Thu Jul 11 11:26:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 11:26:54 +0200 (CEST) Subject: [pypy-commit] stmgc default: Improve the test: check that pair? really evaluates its argument Message-ID: <20130711092654.5256D1C303E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r391:e354e2de05eb Date: 2013-07-11 11:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/e354e2de05eb/ Log: Improve the test: check that pair? really evaluates its argument diff --git a/duhton/test/test_cons.py b/duhton/test/test_cons.py --- a/duhton/test/test_cons.py +++ b/duhton/test/test_cons.py @@ -10,6 +10,8 @@ def test_pair(): assert run("(print (pair? 1))") == "0\n" assert run("(print (pair? (cons 1 2)))") == "1\n" + assert run("(setq x (cons 1 2)) (print (pair? x))") == "1\n" + assert run("(setq x 42) (print (pair? x))") == "0\n" def test_car_cdr(): assert run("(print (car (quote (2 3))))") == "2\n" From noreply at buildbot.pypy.org Thu Jul 11 11:51:11 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 11 Jul 2013 11:51:11 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: update benchmark nos Message-ID: <20130711095111.D1C641C02E4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4982:c7e44e1cc901 Date: 2013-07-11 11:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/c7e44e1cc901/ Log: update benchmark nos diff --git a/blog/draft/duhton.rst b/blog/draft/duhton.rst --- a/blog/draft/duhton.rst +++ b/blog/draft/duhton.rst @@ -23,7 +23,9 @@ there are no conflicting writes to global memory and hence the demos are very amenable to parallelization. They exercise: -* arithmetics - ``demo/many_sqare_roots.duh`` +* arithmetics - ``demo/many_sqare_roots.duh``:: + + * read-only access to globals - ``demo/trees.duh`` @@ -33,3 +35,30 @@ Duhton can be found in `the stmgc repo`_, while the STM-less Duhton, that uses refcounting, can be found in `the duhton repo`_ under the ``base`` branch. + +Below are some benchmarks. Note that this is a little comparing apples to +oranges since the single-threaded duhton uses refcounting GC vs generational +GC for STM version. Future pypy benchmarks will compare more apples to apples. +Moreover none of the benchmarks has any conflicts. Time is the total time +that the benchmark took (not the CPU time) and there was very little variation +in the consecutive runs (definitely below 5%). + ++-----------+---------------------+----------------+-----------+-----------+ +| benchmark | 1 thread (refcount) | 1 thread (stm) | 2 threads | 4 threads | ++-----------+---------------------+----------------+-----------+-----------+ +| square | 1.9s | 3.5s | 1.8s | 0.9s | ++-----------+---------------------+----------------+-----------+-----------+ +| trees | 0.6s | 1.0s | 0.54s | 0.28s | ++-----------+---------------------+----------------+-----------+-----------+ +| trees2 | 1.4s | 2.2s | 1.1s | 0.57s | ++-----------+---------------------+----------------+-----------+-----------+ + +As you can see, the slowdown for STM vs single thread is significant +(1.8x, 1.7x, 1.6x respectively), but still lower than 2x. However the speedup +from running on multiple threads parallelizes the problem almost perfectly. + +While a significant milestone, we hope the next blog post will cover +STM-enabled pypy that's fully working with JIT work ongoing. + +Cheers, +fijal on behalf of Remi Meier and Armin Rigo From noreply at buildbot.pypy.org Thu Jul 11 12:12:19 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 12:12:19 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original: free objects that are copied over prebuilts in the same major collection Message-ID: <20130711101219.B2F551C02E4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original Changeset: r392:ea61566d33cd Date: 2013-07-11 12:12 +0200 http://bitbucket.org/pypy/stmgc/changeset/ea61566d33cd/ Log: free objects that are copied over prebuilts in the same major collection diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -223,6 +223,7 @@ id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ id_copy->h_tid |= GCFLAG_VISITED; + assert(id_copy->h_tid & GCFLAG_OLD); /* XXX: may not always need tracing? */ if (!(id_copy->h_tid & GCFLAG_STUB)) @@ -249,6 +250,7 @@ if (!(obj->h_tid & GCFLAG_VISITED)) { obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; + assert(obj->h_tid & GCFLAG_OLD); gcptrlist_insert(&objects_to_trace, obj); keep_original_alive(obj); @@ -272,6 +274,7 @@ obj = (gcptr)(obj->h_revision - 2); if (!(obj->h_tid & GCFLAG_PUBLIC)) { prev_obj->h_tid |= GCFLAG_VISITED; + assert(prev_obj->h_tid & GCFLAG_OLD); keep_original_alive(prev_obj); assert(*pobj == prev_obj); @@ -315,6 +318,9 @@ obj->h_tid |= GCFLAG_VISITED; B->h_tid |= GCFLAG_VISITED; + assert(obj->h_tid & GCFLAG_OLD); + assert(B->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_STUB)); assert(!(B->h_tid & GCFLAG_STUB)); gcptrlist_insert2(&objects_to_trace, obj, B); @@ -337,6 +343,7 @@ if (!(obj->h_tid & GCFLAG_VISITED)) { obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; + assert(obj->h_tid & GCFLAG_OLD); gcptrlist_insert(&objects_to_trace, obj); if (IS_POINTER(obj->h_revision)) { @@ -379,20 +386,14 @@ if (IS_POINTER((revision_t)next) /* needs to be an object */ && (next->h_revision & 1) /* needs to be a head rev */ && !(obj->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)) { - - /* XXX: WHY never hit? */ - assert(!(next->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - + + /* next may have had PUBLIC_TO_PRIVATE, but that was + cleared in the preceeding visit() */ assert(next->h_tid & GCFLAG_OLD); /* not moved already */ assert(next->h_original == (revision_t)obj); assert(next->h_tid & GCFLAG_PUBLIC); /* no priv/prot! otherwise we'd need to fix more lists like old_objects_to_trace */ - assert(!(next->h_tid & GCFLAG_STUB)); - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(next->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(next->h_tid & GCFLAG_BACKUP_COPY)); /* copy next over obj but preserve possibly existing pre-hash value and tid (prebuilt-flag) */ @@ -420,8 +421,7 @@ for inevitable transactions and others ignore it during tracing. Otherwise, inev transactions will think 'next' is outdated. */ - next->h_tid &= ~GCFLAG_OLD; - + next->h_tid &= ~(GCFLAG_OLD | GCFLAG_VISITED); } /* obj does not need tracing if it can't be reached from somewhere else */ @@ -571,6 +571,11 @@ items[i] = items[--d->private_from_protected.size]; } } + + /* In old_objects_to_trace there can be a now invalid object + NO: never happens because only priv/prot objects can still + be in old_objects_to_trace after the forced minor_collection. + And we do not copy such objects over prebuilts. */ /* If we're aborting this transaction anyway, we don't need to do * more here. @@ -756,6 +761,7 @@ gcptr p = item->addr; if (p->h_tid & GCFLAG_VISITED) { p->h_tid &= ~GCFLAG_VISITED; + assert(p->h_tid & GCFLAG_OLD); } else { G2L_LOOP_DELETE(item); diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -323,9 +323,6 @@ check_prebuilt(p1) assert lib.stm_hash(p1) == 99 check_free_old(p2) - check_not_free(p3) - # XXX: takes another major collection to free p3 - major_collect() check_free_old(p3) def test_prebuilt_version_to_protected(): From noreply at buildbot.pypy.org Thu Jul 11 13:08:57 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 11 Jul 2013 13:08:57 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: a test and a fix Message-ID: <20130711110857.5AB391C1007@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65340:3b852252d21e Date: 2013-07-11 13:08 +0200 http://bitbucket.org/pypy/pypy/changeset/3b852252d21e/ Log: a test and a fix diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -714,6 +714,8 @@ # that is stored in metainterp.virtualizable_boxes[-1] if self.metainterp.heapcache.is_nonstandard_virtualizable(box): return True + if box is self.metainterp.forced_virtualizable: + self.metainterp.forced_virtualizable = None if (self.metainterp.jitdriver_sd.virtualizable_info is not None or self.metainterp.jitdriver_sd.greenfield_info is not None): standard_box = self.metainterp.virtualizable_boxes[-1] @@ -1613,6 +1615,7 @@ self.portal_trace_positions = [] self.free_frames_list = [] self.last_exc_value_box = None + self.forced_virtualizable = None self.partial_trace = None self.retracing_from = -1 self.call_pure_results = args_dict_box() @@ -2277,6 +2280,8 @@ if vinfo is None: return vbox = self.virtualizable_boxes[-1] + if vbox is self.forced_virtualizable: + return # we already forced it by hand force_token_box = history.BoxPtr() # in case the force_token has not been recorded, record it here # to make sure we know the virtualizable can be broken. However, the @@ -2524,6 +2529,11 @@ # ignore the hint on non-standard virtualizable # specifically, ignore it on a virtual return + if self.forced_virtualizable is not None: + # this can happen only in strange cases, but we don't care + # it was already forced + return + self.forced_virtualizable = vbox for i in range(vinfo.num_static_extra_boxes): fieldbox = self.virtualizable_boxes[i] descr = vinfo.static_field_descrs[i] diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1576,7 +1576,11 @@ if bridge is not None: l = [op for op in bridge.operations if op.getopnum() == rop.SETFIELD_GC] - assert len(l) == 2 + assert "'inst_x'" in str(l[0].getdescr().realdescrref()) + assert len(l) == 1 # no vable token + l = [op for op in bridge.operations if + op.getopnum() == rop.GUARD_NOT_FORCED_2] + assert len(l) == 0 class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, From noreply at buildbot.pypy.org Thu Jul 11 13:09:42 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 11 Jul 2013 13:09:42 +0200 (CEST) Subject: [pypy-commit] pypy flowoperators: Close branch before merging Message-ID: <20130711110942.A41861C02E4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: flowoperators Changeset: r65341:2665996814eb Date: 2013-07-11 13:01 +0200 http://bitbucket.org/pypy/pypy/changeset/2665996814eb/ Log: Close branch before merging From noreply at buildbot.pypy.org Thu Jul 11 13:09:44 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 11 Jul 2013 13:09:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge branch flowoperators Message-ID: <20130711110944.3066C1C02E4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r65342:7ba260941669 Date: 2013-07-11 13:06 +0200 http://bitbucket.org/pypy/pypy/changeset/7ba260941669/ Log: Merge branch flowoperators diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -170,7 +170,6 @@ cmdline="--make-jobs", default=detect_number_of_processors()), # Flags of the TranslationContext: - BoolOption("simplifying", "Simplify flow graphs", default=True), BoolOption("list_comprehension_operations", "When true, look for and special-case the sequence of " "operations that results from a list comprehension and " diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -14,7 +14,7 @@ recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, rpython_print_newline) -from rpython.flowspace.operation import implicit_exceptions +from rpython.flowspace.operation import op class FlowingError(Exception): @@ -55,25 +55,44 @@ pass class SpamBlock(Block): - # make slots optional, for debugging - if hasattr(Block, '__slots__'): - __slots__ = "dead framestate".split() - def __init__(self, framestate): Block.__init__(self, framestate.getvariables()) self.framestate = framestate self.dead = False + def make_recorder(self): + return BlockRecorder(self) + class EggBlock(Block): - # make slots optional, for debugging - if hasattr(Block, '__slots__'): - __slots__ = "prevblock booloutcome last_exception".split() - def __init__(self, inputargs, prevblock, booloutcome): Block.__init__(self, inputargs) self.prevblock = prevblock self.booloutcome = booloutcome + @property + def ancestor(self): + parent = self.prevblock + while isinstance(parent, EggBlock): + parent = parent.prevblock + return parent + + @property + def dead(self): + return self.ancestor.dead + + @property + def framestate(self): + return self.ancestor.framestate + + def make_recorder(self): + recorder = BlockRecorder(self) + curr = self + while isinstance(curr, EggBlock): + prev = curr.prevblock + recorder = Replayer(prev, curr.booloutcome, recorder) + curr = prev + return recorder + def extravars(self, last_exception=None, last_exc_value=None): self.last_exception = last_exception @@ -209,6 +228,7 @@ w_exc_cls, w_exc_value = egg.inputargs[-2:] if isinstance(egg.last_exception, Constant): w_exc_cls = egg.last_exception + assert not isinstance(w_exc_cls.value, list) raise ImplicitOperationError(w_exc_cls, w_exc_value) # ____________________________________________________________ @@ -430,44 +450,23 @@ self.last_instr = state.next_instr self.blockstack = state.blocklist[:] - def recording(self, block): - """ Setup recording of the block and return the recorder. """ - parentblocks = [] - parent = block - while isinstance(parent, EggBlock): - parent = parent.prevblock - parentblocks.append(parent) - # parentblocks = [Egg, Egg, ..., Egg, Spam] not including block - if parent.dead: - raise StopFlowing - self.setstate(parent.framestate) - recorder = BlockRecorder(block) - prevblock = block - for parent in parentblocks: - recorder = Replayer(parent, prevblock.booloutcome, recorder) - prevblock = parent - return recorder + def guessbool(self, w_condition, **kwds): + return self.recorder.guessbool(self, w_condition, **kwds) - def record(self, spaceop): - """Record an operation into the active block""" + def do_operation(self, name, *args_w): recorder = self.recorder if getattr(recorder, 'final_state', None) is not None: self.mergeblock(recorder.crnt_block, recorder.final_state) raise StopFlowing - recorder.append(spaceop) - - def guessbool(self, w_condition, **kwds): - return self.recorder.guessbool(self, w_condition, **kwds) - - def do_operation(self, name, *args_w): spaceop = SpaceOperation(name, args_w, Variable()) spaceop.offset = self.last_instr - self.record(spaceop) + recorder.append(spaceop) return spaceop.result def do_operation_with_implicit_exceptions(self, name, *args_w): w_result = self.do_operation(name, *args_w) - self.handle_implicit_exceptions(implicit_exceptions.get(name)) + oper = getattr(op, name) + self.handle_implicit_exceptions(oper.canraise) return w_result def handle_implicit_exceptions(self, exceptions): @@ -488,39 +487,44 @@ self.pendingblocks = collections.deque([graph.startblock]) while self.pendingblocks: block = self.pendingblocks.popleft() - try: - self.recorder = self.recording(block) - while True: - self.last_instr = self.handle_bytecode(self.last_instr) - self.recorder.final_state = self.getstate() + if not block.dead: + self.record_block(block) - except ImplicitOperationError, e: - if isinstance(e.w_type, Constant): - exc_cls = e.w_type.value - else: - exc_cls = Exception - msg = "implicit %s shouldn't occur" % exc_cls.__name__ - w_type = Constant(AssertionError) - w_value = Constant(AssertionError(msg)) - link = Link([w_type, w_value], graph.exceptblock) - self.recorder.crnt_block.closeblock(link) + def record_block(self, block): + self.setstate(block.framestate) + self.recorder = block.make_recorder() + try: + while True: + self.last_instr = self.handle_bytecode(self.last_instr) + self.recorder.final_state = self.getstate() - except FSException, e: - if e.w_type is self.space.w_ImportError: - msg = 'import statement always raises %s' % e - raise ImportError(msg) - link = Link([e.w_type, e.w_value], graph.exceptblock) - self.recorder.crnt_block.closeblock(link) + except ImplicitOperationError, e: + if isinstance(e.w_type, Constant): + exc_cls = e.w_type.value + else: + exc_cls = Exception + msg = "implicit %s shouldn't occur" % exc_cls.__name__ + w_type = Constant(AssertionError) + w_value = Constant(AssertionError(msg)) + link = Link([w_type, w_value], self.graph.exceptblock) + self.recorder.crnt_block.closeblock(link) - except StopFlowing: - pass + except FSException, e: + if e.w_type == self.space.w_ImportError: + msg = 'import statement always raises %s' % e + raise ImportError(msg) + link = Link([e.w_type, e.w_value], self.graph.exceptblock) + self.recorder.crnt_block.closeblock(link) - except Return as exc: - w_result = exc.value - link = Link([w_result], graph.returnblock) - self.recorder.crnt_block.closeblock(link) + except StopFlowing: + pass - del self.recorder + except Return as exc: + w_result = exc.value + link = Link([w_result], self.graph.returnblock) + self.recorder.crnt_block.closeblock(link) + + self.recorder = None def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr @@ -661,8 +665,8 @@ self.last_exception = operr raise operr else: - raise FSException(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) + raise space.exc_wrap(TypeError( + "raise: no active exception to re-raise")) w_value = space.w_None if nbargs >= 3: diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -3,6 +3,7 @@ # # the below object/attribute model evolved from # a discussion in Berlin, 4th of october 2003 +import types import py from rpython.tool.uid import uid, Hashable @@ -261,6 +262,7 @@ dummyname = 'v' namesdict = {dummyname : (dummyname, 0)} + @property def name(self): _name = self._name _nr = self._nr @@ -270,11 +272,10 @@ _nr = self._nr = nd[_name][1] nd[_name] = (_name, _nr + 1) return "%s%d" % (_name, _nr) - name = property(name) + @property def renamed(self): return self._name is not self.dummyname - renamed = property(renamed) def __init__(self, name=None): self._name = self.dummyname @@ -314,6 +315,9 @@ self._name = intern(name) self._nr = nr + def foldable(self): + return False + class Constant(Hashable): __slots__ = ["concretetype"] @@ -323,6 +327,25 @@ if concretetype is not None: self.concretetype = concretetype + def foldable(self): + to_check = self.value + if hasattr(to_check, 'im_self'): + to_check = to_check.im_self + if isinstance(to_check, (type, types.ClassType, types.ModuleType)): + # classes/types/modules are assumed immutable + return True + if (hasattr(to_check, '__class__') and + to_check.__class__.__module__ == '__builtin__'): + # builtin object + return True + # User-created instance + if hasattr(to_check, '_freeze_'): + assert to_check._freeze_() is True + return True + else: + # cannot count on it not mutating at runtime! + return False + class UnwrapException(Exception): """Attempted to unwrap a Variable.""" diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -19,7 +19,7 @@ from rpython.flowspace.pygraph import PyGraph from rpython.flowspace.specialcase import SPECIAL_CASES from rpython.rlib.unroll import unrolling_iterable, _unroller -from rpython.rlib import rstackovf, rarithmetic +from rpython.rlib import rstackovf from rpython.rlib.rarithmetic import is_valid_int @@ -45,6 +45,16 @@ } } +# built-ins that can always raise exceptions +builtins_exceptions = { + int: [ValueError], + float: [ValueError], + chr: [ValueError], + unichr: [ValueError], + unicode: [UnicodeDecodeError], +} + + def _assert_rpythonic(func): """Raise ValueError if ``func`` is obviously not RPython""" if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): @@ -135,6 +145,11 @@ raise WrapException return Constant(obj) + def exc_wrap(self, exc): + w_value = self.wrap(exc) + w_type = self.wrap(type(exc)) + return FSException(w_type, w_value) + def int_w(self, w_obj): if isinstance(w_obj, Constant): val = w_obj.value @@ -143,15 +158,6 @@ return val return self.unwrap(w_obj) - def uint_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if type(val) is not rarithmetic.r_uint: - raise TypeError("expected unsigned: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - - def str_w(self, w_obj): if isinstance(w_obj, Constant): val = w_obj.value @@ -160,14 +166,6 @@ return val return self.unwrap(w_obj) - def float_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if type(val) is not float: - raise TypeError("expected float: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - def unwrap(self, w_obj): if isinstance(w_obj, Variable): raise UnwrapException @@ -176,40 +174,9 @@ else: raise TypeError("not wrapped: " + repr(w_obj)) - def unwrap_for_computation(self, w_obj): - obj = self.unwrap(w_obj) - to_check = obj - if hasattr(to_check, 'im_self'): - to_check = to_check.im_self - if (not isinstance(to_check, (type, types.ClassType, types.ModuleType)) and - # classes/types/modules are assumed immutable - hasattr(to_check, '__class__') and to_check.__class__.__module__ != '__builtin__'): - frozen = hasattr(to_check, '_freeze_') - if frozen: - assert to_check._freeze_() is True - else: - # cannot count on it not mutating at runtime! - raise UnwrapException - return obj - def exception_issubclass_w(self, w_cls1, w_cls2): return self.is_true(self.issubtype(w_cls1, w_cls2)) - def _exception_match(self, w_exc_type, w_check_class): - """Helper for exception_match - - Handles the base case where w_check_class is a constant exception - type. - """ - if self.is_w(w_exc_type, w_check_class): - return True # fast path (also here to handle string exceptions) - try: - return self.exception_issubclass_w(w_exc_type, w_check_class) - except FSException, e: - if e.match(self, self.w_TypeError): # string exceptions maybe - return False - raise - def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" try: @@ -221,47 +188,43 @@ "Catching %s is not valid in RPython" % check_class.__name__) if not isinstance(check_class, tuple): # the simple case - return self._exception_match(w_exc_type, w_check_class) + return self.exception_issubclass_w(w_exc_type, w_check_class) # special case for StackOverflow (see rlib/rstackovf.py) if check_class == rstackovf.StackOverflow: w_real_class = self.wrap(rstackovf._StackOverflow) - return self._exception_match(w_exc_type, w_real_class) + return self.exception_issubclass_w(w_exc_type, w_real_class) # checking a tuple of classes for w_klass in self.unpackiterable(w_check_class): if self.exception_match(w_exc_type, w_klass): return True return False - def exc_from_raise(self, w_type, w_value): + def exc_from_raise(self, w_arg1, w_arg2): """ Create a wrapped exception from the arguments of a raise statement. Returns an FSException object whose w_value is an instance of w_type. """ - if self.isinstance_w(w_type, self.w_type): + if self.isinstance_w(w_arg1, self.w_type): # this is for all cases of the form (Class, something) - if self.is_w(w_value, self.w_None): + if self.is_w(w_arg2, self.w_None): # raise Type: we assume we have to instantiate Type - w_value = self.call_function(w_type) - w_type = self.type(w_value) + w_value = self.call_function(w_arg1) else: - w_valuetype = self.type(w_value) - if self.exception_issubclass_w(w_valuetype, w_type): + w_valuetype = self.type(w_arg2) + if self.exception_issubclass_w(w_valuetype, w_arg1): # raise Type, Instance: let etype be the exact type of value - w_type = w_valuetype + w_value = w_arg2 else: # raise Type, X: assume X is the constructor argument - w_value = self.call_function(w_type, w_value) - w_type = self.type(w_value) + w_value = self.call_function(w_arg1, w_arg2) else: # the only case left here is (inst, None), from a 'raise inst'. - w_inst = w_type - w_instclass = self.type(w_inst) - if not self.is_w(w_value, self.w_None): - raise FSException(self.w_TypeError, self.wrap( + if not self.is_w(w_arg2, self.w_None): + raise self.exc_wrap(TypeError( "instance exception may not have a separate value")) - w_value = w_inst - w_type = w_instclass + w_value = w_arg1 + w_type = self.type(w_value) return FSException(w_type, w_value) def unpackiterable(self, w_iterable): @@ -291,12 +254,8 @@ return self.wrap(not self.is_true(w_obj)) def is_true(self, w_obj): - try: - obj = self.unwrap_for_computation(w_obj) - except UnwrapException: - pass - else: - return bool(obj) + if w_obj.foldable(): + return bool(w_obj.value) w_truthvalue = self.frame.do_operation('is_true', w_obj) return self.frame.guessbool(w_truthvalue) @@ -316,7 +275,7 @@ try: v, next_unroller = it.step() except IndexError: - raise FSException(self.w_StopIteration, self.w_None) + raise self.exc_wrap(StopIteration()) else: frame.replace_in_stack(it, next_unroller) return self.wrap(v) @@ -324,16 +283,6 @@ frame.handle_implicit_exceptions([StopIteration, RuntimeError]) return w_item - def setitem(self, w_obj, w_key, w_val): - # protect us from globals write access - if w_obj is self.frame.w_globals: - raise FlowingError(self.frame, - "Attempting to modify global variable %r." % (w_key)) - return self.frame.do_operation_with_implicit_exceptions('setitem', - w_obj, w_key, w_val) - - def setitem_str(self, w_obj, key, w_value): - return self.setitem(w_obj, self.wrap(key), w_value) def getattr(self, w_obj, w_name): # handling special things like sys @@ -343,12 +292,8 @@ if w_name not in const_w: return self.frame.do_operation_with_implicit_exceptions('getattr', w_obj, w_name) - try: - obj = self.unwrap_for_computation(w_obj) - name = self.unwrap_for_computation(w_name) - except UnwrapException: - pass - else: + if w_obj.foldable() and w_name.foldable(): + obj, name = w_obj.value, w_name.value try: result = getattr(obj, name) except Exception, e: @@ -369,8 +314,8 @@ def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: mod = __import__(name, glob, loc, frm, level) - except ImportError, e: - raise FSException(self.w_ImportError, self.wrap(str(e))) + except ImportError as e: + raise self.exc_wrap(e) return self.wrap(mod) def import_from(self, w_module, w_name): @@ -378,15 +323,15 @@ assert isinstance(w_name, Constant) # handle sys if w_module in self.not_really_const: - const_w = self.not_really_const[w_obj] + const_w = self.not_really_const[w_module] if w_name not in const_w: return self.frame.do_operation_with_implicit_exceptions('getattr', - w_obj, w_name) + w_module, w_name) try: return self.wrap(getattr(w_module.value, w_name.value)) except AttributeError: - raise FSException(self.w_ImportError, - self.wrap("cannot import name '%s'" % w_name.value)) + raise self.exc_wrap(ImportError( + "cannot import name '%s'" % w_name.value)) def call_method(self, w_obj, methname, *arg_w): w_meth = self.getattr(w_obj, self.wrap(methname)) @@ -417,7 +362,7 @@ args_w = args.arguments_w + self.unpackiterable(args.w_stararg) else: args_w = args.arguments_w - return sc(self, fn, args_w) + return sc(self, args_w) if args.keywords or isinstance(args.w_stararg, Variable): shape, args_w = args.flatten() @@ -430,15 +375,6 @@ args_w = args.arguments_w w_res = self.frame.do_operation('simple_call', w_callable, *args_w) - # maybe the call has generated an exception (any one) - # but, let's say, not if we are calling a built-in class or function - # because this gets in the way of the special-casing of - # - # raise SomeError(x) - # - # as shown by test_objspace.test_raise3. - - exceptions = [Exception] # *any* exception by default if isinstance(w_callable, Constant): c = w_callable.value if (isinstance(c, (types.BuiltinFunctionType, @@ -446,8 +382,11 @@ types.ClassType, types.TypeType)) and c.__module__ in ['__builtin__', 'exceptions']): - exceptions = operation.implicit_exceptions.get(c) - self.frame.handle_implicit_exceptions(exceptions) + if c in builtins_exceptions: + self.frame.handle_implicit_exceptions(builtins_exceptions[c]) + return w_res + # *any* exception for non-builtins + self.frame.handle_implicit_exceptions([Exception]) return w_res def find_global(self, w_globals, varname): @@ -462,82 +401,61 @@ raise FlowingError(self.frame, self.wrap(message)) return self.wrap(value) -def make_op(name, arity): +def make_impure_op(oper): + def generic_operator(self, *args_w): + if len(args_w) != oper.arity: + raise TypeError(oper.name + " got the wrong number of arguments") + w_result = self.frame.do_operation_with_implicit_exceptions(oper.name, *args_w) + return w_result + return generic_operator + +def make_op(oper): """Add function operation to the flow space.""" - if getattr(FlowObjSpace, name, None) is not None: - return - - op = None - skip = False - arithmetic = False - - if (name.startswith('del') or - name.startswith('set') or - name.startswith('inplace_')): - # skip potential mutators - skip = True - elif name in ('id', 'hash', 'iter', 'userdel'): - # skip potential runtime context dependecies - skip = True - elif name in ('repr', 'str'): - rep = getattr(__builtin__, name) - def op(obj): - s = rep(obj) - if "at 0x" in s: - print >>sys.stderr, "Warning: captured address may be awkward" - return s - else: - op = operation.FunctionByName[name] - arithmetic = (name + '_ovf') in operation.FunctionByName - - if not op and not skip: - raise ValueError("XXX missing operator: %s" % (name,)) + name = oper.name + func = oper.pyfunc def generic_operator(self, *args_w): - assert len(args_w) == arity, name + " got the wrong number of arguments" - if op: - args = [] - for w_arg in args_w: - try: - arg = self.unwrap_for_computation(w_arg) - except UnwrapException: - break + assert len(args_w) == oper.arity, name + " got the wrong number of arguments" + args = [] + if all(w_arg.foldable() for w_arg in args_w): + args = [w_arg.value for w_arg in args_w] + # All arguments are constants: call the operator now + try: + result = func(*args) + except Exception, e: + etype = e.__class__ + msg = "%s%r always raises %s: %s" % ( + name, tuple(args), etype, e) + raise FlowingError(self.frame, msg) + else: + # don't try to constant-fold operations giving a 'long' + # result. The result is probably meant to be sent to + # an intmask(), but the 'long' constant confuses the + # annotator a lot. + if oper.can_overflow and type(result) is long: + pass + # don't constant-fold getslice on lists, either + elif name == 'getslice' and type(result) is list: + pass + # otherwise, fine else: - args.append(arg) - else: - # All arguments are constants: call the operator now - try: - result = op(*args) - except Exception, e: - etype = e.__class__ - msg = "%s%r always raises %s: %s" % ( - name, tuple(args), etype, e) - raise FlowingError(self.frame, msg) - else: - # don't try to constant-fold operations giving a 'long' - # result. The result is probably meant to be sent to - # an intmask(), but the 'long' constant confuses the - # annotator a lot. - if arithmetic and type(result) is long: + try: + return self.wrap(result) + except WrapException: + # type cannot sanely appear in flow graph, + # store operation with variable result instead pass - # don't constant-fold getslice on lists, either - elif name == 'getslice' and type(result) is list: - pass - # otherwise, fine - else: - try: - return self.wrap(result) - except WrapException: - # type cannot sanely appear in flow graph, - # store operation with variable result instead - pass w_result = self.frame.do_operation_with_implicit_exceptions(name, *args_w) return w_result + return generic_operator - setattr(FlowObjSpace, name, generic_operator) - -for (name, symbol, arity, specialnames) in operation.MethodTable: - make_op(name, arity) +for oper in operation.op.__dict__.values(): + if getattr(FlowObjSpace, oper.name, None) is None: + if oper.pure: + op_method = make_op(oper) + else: + op_method = make_impure_op(oper) + setattr(FlowObjSpace, oper.name, op_method) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -8,93 +8,53 @@ import operator from rpython.tool.sourcetools import compile2 from rpython.rlib.rarithmetic import ovfcheck +from rpython.flowspace.model import Constant -# this is a copy that should be shared with standard objspace +class _OpHolder(object): pass +op = _OpHolder() -MethodTable = [ -# method name # symbol # number of arguments # special method name(s) - ('is_', 'is', 2, []), - ('id', 'id', 1, []), - ('type', 'type', 1, []), - ('isinstance', 'isinstance', 2, ['__instancecheck__']), - ('issubtype', 'issubtype', 2, ['__subclasscheck__']), # not for old-style classes - ('repr', 'repr', 1, ['__repr__']), - ('str', 'str', 1, ['__str__']), - ('format', 'format', 2, ['__format__']), - ('len', 'len', 1, ['__len__']), - ('hash', 'hash', 1, ['__hash__']), - ('getattr', 'getattr', 2, ['__getattribute__']), - ('setattr', 'setattr', 3, ['__setattr__']), - ('delattr', 'delattr', 2, ['__delattr__']), - ('getitem', 'getitem', 2, ['__getitem__']), - ('setitem', 'setitem', 3, ['__setitem__']), - ('delitem', 'delitem', 2, ['__delitem__']), - ('getslice', 'getslice', 3, ['__getslice__']), - ('setslice', 'setslice', 4, ['__setslice__']), - ('delslice', 'delslice', 3, ['__delslice__']), - ('trunc', 'trunc', 1, ['__trunc__']), - ('pos', 'pos', 1, ['__pos__']), - ('neg', 'neg', 1, ['__neg__']), - ('nonzero', 'truth', 1, ['__nonzero__']), - ('abs' , 'abs', 1, ['__abs__']), - ('hex', 'hex', 1, ['__hex__']), - ('oct', 'oct', 1, ['__oct__']), - ('ord', 'ord', 1, []), - ('invert', '~', 1, ['__invert__']), - ('add', '+', 2, ['__add__', '__radd__']), - ('sub', '-', 2, ['__sub__', '__rsub__']), - ('mul', '*', 2, ['__mul__', '__rmul__']), - ('truediv', '/', 2, ['__truediv__', '__rtruediv__']), - ('floordiv', '//', 2, ['__floordiv__', '__rfloordiv__']), - ('div', 'div', 2, ['__div__', '__rdiv__']), - ('mod', '%', 2, ['__mod__', '__rmod__']), - ('divmod', 'divmod', 2, ['__divmod__', '__rdivmod__']), - ('pow', '**', 3, ['__pow__', '__rpow__']), - ('lshift', '<<', 2, ['__lshift__', '__rlshift__']), - ('rshift', '>>', 2, ['__rshift__', '__rrshift__']), - ('and_', '&', 2, ['__and__', '__rand__']), - ('or_', '|', 2, ['__or__', '__ror__']), - ('xor', '^', 2, ['__xor__', '__rxor__']), - ('int', 'int', 1, ['__int__']), - ('index', 'index', 1, ['__index__']), - ('float', 'float', 1, ['__float__']), - ('long', 'long', 1, ['__long__']), - ('inplace_add', '+=', 2, ['__iadd__']), - ('inplace_sub', '-=', 2, ['__isub__']), - ('inplace_mul', '*=', 2, ['__imul__']), - ('inplace_truediv', '/=', 2, ['__itruediv__']), - ('inplace_floordiv','//=', 2, ['__ifloordiv__']), - ('inplace_div', 'div=', 2, ['__idiv__']), - ('inplace_mod', '%=', 2, ['__imod__']), - ('inplace_pow', '**=', 2, ['__ipow__']), - ('inplace_lshift', '<<=', 2, ['__ilshift__']), - ('inplace_rshift', '>>=', 2, ['__irshift__']), - ('inplace_and', '&=', 2, ['__iand__']), - ('inplace_or', '|=', 2, ['__ior__']), - ('inplace_xor', '^=', 2, ['__ixor__']), - ('lt', '<', 2, ['__lt__', '__gt__']), - ('le', '<=', 2, ['__le__', '__ge__']), - ('eq', '==', 2, ['__eq__', '__eq__']), - ('ne', '!=', 2, ['__ne__', '__ne__']), - ('gt', '>', 2, ['__gt__', '__lt__']), - ('ge', '>=', 2, ['__ge__', '__le__']), - ('cmp', 'cmp', 2, ['__cmp__']), # rich cmps preferred - ('coerce', 'coerce', 2, ['__coerce__', '__coerce__']), - ('contains', 'contains', 2, ['__contains__']), - ('iter', 'iter', 1, ['__iter__']), - ('next', 'next', 1, ['next']), -# ('call', 'call', 3, ['__call__']), - ('get', 'get', 3, ['__get__']), - ('set', 'set', 3, ['__set__']), - ('delete', 'delete', 2, ['__delete__']), - ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py - ] +func2op = {} +class SpaceOperator(object): + def __init__(self, name, arity, symbol, pyfunc, pure=False, + can_overflow=False): + self.name = name + self.arity = arity + self.symbol = symbol + self.pyfunc = pyfunc + self.pure = pure + self.can_overflow = can_overflow + self.canraise = [] -FunctionByName = {} # dict {"operation_name": } -OperationName = {} # dict {: "operation_name"} -Arity = {} # dict {"operation name": number of arguments} + def make_sc(self): + def sc_operator(space, args_w): + if len(args_w) != self.arity: + if self is op.pow and len(args_w) == 2: + args_w = args_w + [Constant(None)] + elif self is op.getattr and len(args_w) == 3: + return space.frame.do_operation('simple_call', Constant(getattr), *args_w) + else: + raise Exception("should call %r with exactly %d arguments" % ( + self.name, self.arity)) + # completely replace the call with the underlying + # operation and its limited implicit exceptions semantic + return getattr(space, self.name)(*args_w) + return sc_operator + + +def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): + operator_func = getattr(operator, name, None) + oper = SpaceOperator(name, arity, symbol, pyfunc, pure, can_overflow=ovf) + setattr(op, name, oper) + if pyfunc is not None: + func2op[pyfunc] = oper + if operator_func: + func2op[operator_func] = oper + if pyfunc is None: + oper.pyfunc = operator_func + if ovf: + ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) + add_operator(name + '_ovf', arity, symbol, pyfunc=ovf_func) # ____________________________________________________________ @@ -186,33 +146,6 @@ def userdel(x): x.__del__() -def neg_ovf(x): - return ovfcheck(-x) - -def abs_ovf(x): - return ovfcheck(abs(x)) - -def add_ovf(x, y): - return ovfcheck(x + y) - -def sub_ovf(x, y): - return ovfcheck(x - y) - -def mul_ovf(x, y): - return ovfcheck(x * y) - -def floordiv_ovf(x, y): - return ovfcheck(operator.floordiv(x, y)) - -def div_ovf(x, y): - return ovfcheck(operator.div(x, y)) - -def mod_ovf(x, y): - return ovfcheck(x % y) - -def lshift_ovf(x, y): - return ovfcheck(x << y) - # slicing: operator.{get,set,del}slice() don't support b=None or c=None def do_getslice(a, b, c): return a[b:c] @@ -226,101 +159,91 @@ def unsupported(*args): raise ValueError("this is not supported") -# ____________________________________________________________ -# The following table can list several times the same operation name, -# if multiple built-in functions correspond to it. The first one should -# be picked, though, as the best built-in for the given operation name. -# Lines ('name', operator.name) are added automatically. +add_operator('is_', 2, 'is', pure=True) +add_operator('id', 1, 'id', pyfunc=id) +add_operator('type', 1, 'type', pyfunc=new_style_type, pure=True) +add_operator('isinstance', 2, 'isinstance', pyfunc=isinstance, pure=True) +add_operator('issubtype', 2, 'issubtype', pyfunc=issubclass, pure=True) # not for old-style classes +add_operator('repr', 1, 'repr', pyfunc=repr, pure=True) +add_operator('str', 1, 'str', pyfunc=str, pure=True) +add_operator('format', 2, 'format', pyfunc=unsupported) +add_operator('len', 1, 'len', pyfunc=len, pure=True) +add_operator('hash', 1, 'hash', pyfunc=hash) +add_operator('getattr', 2, 'getattr', pyfunc=getattr, pure=True) +add_operator('setattr', 3, 'setattr', pyfunc=setattr) +add_operator('delattr', 2, 'delattr', pyfunc=delattr) +add_operator('getitem', 2, 'getitem', pure=True) +add_operator('setitem', 3, 'setitem') +add_operator('delitem', 2, 'delitem') +add_operator('getslice', 3, 'getslice', pyfunc=do_getslice, pure=True) +add_operator('setslice', 4, 'setslice', pyfunc=do_setslice) +add_operator('delslice', 3, 'delslice', pyfunc=do_delslice) +add_operator('trunc', 1, 'trunc', pyfunc=unsupported) +add_operator('pos', 1, 'pos', pure=True) +add_operator('neg', 1, 'neg', pure=True, ovf=True) +add_operator('nonzero', 1, 'truth', pyfunc=bool, pure=True) +op.is_true = op.nonzero +add_operator('abs' , 1, 'abs', pyfunc=abs, pure=True, ovf=True) +add_operator('hex', 1, 'hex', pyfunc=hex, pure=True) +add_operator('oct', 1, 'oct', pyfunc=oct, pure=True) +add_operator('ord', 1, 'ord', pyfunc=ord, pure=True) +add_operator('invert', 1, '~', pure=True) +add_operator('add', 2, '+', pure=True, ovf=True) +add_operator('sub', 2, '-', pure=True, ovf=True) +add_operator('mul', 2, '*', pure=True, ovf=True) +add_operator('truediv', 2, '/', pure=True) +add_operator('floordiv', 2, '//', pure=True, ovf=True) +add_operator('div', 2, 'div', pure=True, ovf=True) +add_operator('mod', 2, '%', pure=True, ovf=True) +add_operator('divmod', 2, 'divmod', pyfunc=divmod, pure=True) +add_operator('pow', 3, '**', pyfunc=pow, pure=True) +add_operator('lshift', 2, '<<', pure=True, ovf=True) +add_operator('rshift', 2, '>>', pure=True) +add_operator('and_', 2, '&', pure=True) +add_operator('or_', 2, '|', pure=True) +add_operator('xor', 2, '^', pure=True) +add_operator('int', 1, 'int', pyfunc=do_int, pure=True) +add_operator('index', 1, 'index', pyfunc=do_index, pure=True) +add_operator('float', 1, 'float', pyfunc=do_float, pure=True) +add_operator('long', 1, 'long', pyfunc=do_long, pure=True) +add_operator('inplace_add', 2, '+=', pyfunc=inplace_add) +add_operator('inplace_sub', 2, '-=', pyfunc=inplace_sub) +add_operator('inplace_mul', 2, '*=', pyfunc=inplace_mul) +add_operator('inplace_truediv', 2, '/=', pyfunc=inplace_truediv) +add_operator('inplace_floordiv', 2, '//=', pyfunc=inplace_floordiv) +add_operator('inplace_div', 2, 'div=', pyfunc=inplace_div) +add_operator('inplace_mod', 2, '%=', pyfunc=inplace_mod) +add_operator('inplace_pow', 2, '**=', pyfunc=inplace_pow) +add_operator('inplace_lshift', 2, '<<=', pyfunc=inplace_lshift) +add_operator('inplace_rshift', 2, '>>=', pyfunc=inplace_rshift) +add_operator('inplace_and', 2, '&=', pyfunc=inplace_and) +add_operator('inplace_or', 2, '|=', pyfunc=inplace_or) +add_operator('inplace_xor', 2, '^=', pyfunc=inplace_xor) +add_operator('lt', 2, '<', pure=True) +add_operator('le', 2, '<=', pure=True) +add_operator('eq', 2, '==', pure=True) +add_operator('ne', 2, '!=', pure=True) +add_operator('gt', 2, '>', pure=True) +add_operator('ge', 2, '>=', pure=True) +add_operator('cmp', 2, 'cmp', pyfunc=cmp, pure=True) # rich cmps preferred +add_operator('coerce', 2, 'coerce', pyfunc=coerce, pure=True) +add_operator('contains', 2, 'contains', pure=True) +add_operator('iter', 1, 'iter', pyfunc=iter) +add_operator('next', 1, 'next', pyfunc=next) +#add_operator('call', 3, 'call') +add_operator('get', 3, 'get', pyfunc=get, pure=True) +add_operator('set', 3, 'set', pyfunc=set) +add_operator('delete', 2, 'delete', pyfunc=delete) +add_operator('userdel', 1, 'del', pyfunc=userdel) +add_operator('buffer', 1, 'buffer', pyfunc=buffer, pure=True) # see buffer.py -# INTERNAL ONLY, use the dicts declared at the top of the file. -Table = [ - ('id', id), - ('type', new_style_type), - ('type', type), - ('isinstance', isinstance), - ('issubtype', issubclass), - ('repr', repr), - ('str', str), - ('format', unsupported), - ('len', len), - ('hash', hash), - ('getattr', getattr), - ('setattr', setattr), - ('delattr', delattr), - ('nonzero', bool), - ('nonzero', operator.truth), - ('is_true', bool), - ('is_true', operator.truth), - ('trunc', unsupported), - ('abs' , abs), - ('hex', hex), - ('oct', oct), - ('ord', ord), - ('divmod', divmod), - ('pow', pow), - ('int', do_int), - ('index', do_index), - ('float', do_float), - ('long', do_long), - ('inplace_add', inplace_add), - ('inplace_sub', inplace_sub), - ('inplace_mul', inplace_mul), - ('inplace_truediv', inplace_truediv), - ('inplace_floordiv',inplace_floordiv), - ('inplace_div', inplace_div), - ('inplace_mod', inplace_mod), - ('inplace_pow', inplace_pow), - ('inplace_lshift', inplace_lshift), - ('inplace_rshift', inplace_rshift), - ('inplace_and', inplace_and), - ('inplace_or', inplace_or), - ('inplace_xor', inplace_xor), - ('cmp', cmp), - ('coerce', coerce), - ('iter', iter), - ('next', next), - ('get', get), - ('set', set), - ('delete', delete), - ('userdel', userdel), - ('buffer', buffer), - ('getslice', do_getslice), - ('setslice', do_setslice), - ('delslice', do_delslice), - # --- operations added by graph transformations --- - ('neg_ovf', neg_ovf), - ('abs_ovf', abs_ovf), - ('add_ovf', add_ovf), - ('sub_ovf', sub_ovf), - ('mul_ovf', mul_ovf), - ('floordiv_ovf', floordiv_ovf), - ('div_ovf', div_ovf), - ('mod_ovf', mod_ovf), - ('lshift_ovf', lshift_ovf), -] +# Other functions that get directly translated to SpaceOperators +func2op[type] = op.type +func2op[operator.truth] = op.nonzero if hasattr(__builtin__, 'next'): - Table.append(('next', __builtin__.next)) + func2op[__builtin__.next] = op.next -def setup(): - # insert all operators - for line in MethodTable: - name = line[0] - if hasattr(operator, name): - Table.append((name, getattr(operator, name))) - # build the dictionaries - for name, func in Table: - if name not in FunctionByName: - FunctionByName[name] = func - if func not in OperationName: - OperationName[func] = name - # check that the result is complete - for line in MethodTable: - name = line[0] - Arity[name] = line[2] - assert name in FunctionByName -setup() -del Table, setup # INTERNAL ONLY, use the dicts declared at the top of the file op_appendices = { OverflowError: 'ovf', @@ -330,24 +253,18 @@ ValueError: 'val', } -implicit_exceptions = { - int: [ValueError], # built-ins that can always raise exceptions - float: [ValueError], - chr: [ValueError], - unichr: [ValueError], - unicode: [UnicodeDecodeError], - # specifying IndexError, and KeyError beyond Exception, - # allows the annotator to be more precise, see test_reraiseAnything/KeyError in - # the annotator tests - 'getitem': [IndexError, KeyError, Exception], - 'setitem': [IndexError, KeyError, Exception], - 'delitem': [IndexError, KeyError, Exception], - 'contains': [Exception], # from an r_dict - } +# specifying IndexError, and KeyError beyond Exception, +# allows the annotator to be more precise, see test_reraiseAnything/KeyError in +# the annotator tests +op.getitem.canraise = [IndexError, KeyError, Exception] +op.setitem.canraise = [IndexError, KeyError, Exception] +op.delitem.canraise = [IndexError, KeyError, Exception] +op.contains.canraise = [Exception] # from an r_dict def _add_exceptions(names, exc): for name in names.split(): - lis = implicit_exceptions.setdefault(name, []) + oper = getattr(op, name) + lis = oper.canraise if exc in lis: raise ValueError, "your list is causing duplication!" lis.append(exc) @@ -356,12 +273,13 @@ def _add_except_ovf(names): # duplicate exceptions and add OverflowError for name in names.split(): - lis = implicit_exceptions.setdefault(name, [])[:] - lis.append(OverflowError) - implicit_exceptions[name+"_ovf"] = lis + oper = getattr(op, name) + oper_ovf = getattr(op, name+'_ovf') + oper_ovf.canraise = list(oper.canraise) + oper_ovf.canraise.append(OverflowError) _add_exceptions("""div mod divmod truediv floordiv pow - inplace_div inplace_mod inplace_divmod inplace_truediv + inplace_div inplace_mod inplace_truediv inplace_floordiv inplace_pow""", ZeroDivisionError) _add_exceptions("""pow inplace_pow lshift inplace_lshift rshift inplace_rshift""", ValueError) @@ -370,7 +288,7 @@ inplace_floordiv inplace_div inplace_mod inplace_pow inplace_lshift""", OverflowError) # without a _ovf version _add_except_ovf("""neg abs add sub mul - floordiv div mod pow lshift""") # with a _ovf version + floordiv div mod lshift""") # with a _ovf version _add_exceptions("""pow""", OverflowError) # for the float case del _add_exceptions, _add_except_ovf diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -1,27 +1,13 @@ from rpython.flowspace.model import Constant -from rpython.flowspace.operation import OperationName, Arity +from rpython.flowspace.operation import func2op, op from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated -def sc_import(space, fn, args_w): +def sc_import(space, args_w): assert len(args_w) > 0 and len(args_w) <= 5, 'import needs 1 to 5 arguments' args = [space.unwrap(arg) for arg in args_w] return space.import_name(*args) -def sc_operator(space, fn, args_w): - opname = OperationName[fn] - if len(args_w) != Arity[opname]: - if opname == 'pow' and len(args_w) == 2: - args_w = args_w + [Constant(None)] - elif opname == 'getattr' and len(args_w) == 3: - return space.frame.do_operation('simple_call', Constant(getattr), *args_w) - else: - raise Exception("should call %r with exactly %d arguments" % ( - fn, Arity[opname])) - # completely replace the call with the underlying - # operation and its limited implicit exceptions semantic - return getattr(space, opname)(*args_w) - # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: @@ -47,7 +33,7 @@ # _________________________________________________________________________ -def sc_r_uint(space, r_uint, args_w): +def sc_r_uint(space, args_w): # special case to constant-fold r_uint(32-bit-constant) # (normally, the 32-bit constant is a long, and is not allowed to # show up in the flow graphs at all) @@ -56,10 +42,10 @@ return Constant(r_uint(w_value.value)) return space.frame.do_operation('simple_call', space.wrap(r_uint), w_value) -def sc_we_are_translated(space, we_are_translated, args_w): +def sc_we_are_translated(space, args_w): return Constant(True) -def sc_locals(space, locals, args): +def sc_locals(space, args): raise Exception( "A function calling locals() is not RPython. " "Note that if you're translating code outside the PyPy " @@ -71,5 +57,5 @@ SPECIAL_CASES = {__import__: sc_import, r_uint: sc_r_uint, we_are_translated: sc_we_are_translated, locals: sc_locals} -for fn in OperationName: - SPECIAL_CASES[fn] = sc_operator +for fn, oper in func2op.items(): + SPECIAL_CASES[fn] = oper.make_sc() diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1,6 +1,6 @@ from __future__ import with_statement import new -import py, sys +import py from contextlib import contextmanager from rpython.flowspace.model import Constant, mkentrymap, c_last_exception @@ -1169,6 +1169,28 @@ 'iter': 1, 'newlist': 1, 'next': 1, 'simple_call': 1} + def test_mutate_const_list(self): + lst = list('abcdef') + def f(): + lst[0] = 'x' + return lst + graph = self.codetest(f) + assert 'setitem' in self.all_operations(graph) + + def test_sys_getattr(self): + def f(): + import sys + return sys.modules + graph = self.codetest(f) + assert 'getattr' in self.all_operations(graph) + + def test_sys_import_from(self): + def f(): + from sys import modules + return modules + graph = self.codetest(f) + assert 'getattr' in self.all_operations(graph) + DATA = {'x': 5, 'y': 6} diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -1,4 +1,4 @@ -from rpython.flowspace.operation import FunctionByName +from rpython.flowspace.operation import op from rpython.rlib import debug from rpython.rlib.rarithmetic import is_valid_int from rpython.rtyper.lltypesystem import lltype, llmemory @@ -13,7 +13,6 @@ 'lt': True, 'le': True, 'eq': True, 'ne': True, 'is_true': True} -ops_unary = {'is_true': True, 'neg': True, 'abs': True, 'invert': True} # global synonyms for some types from rpython.rlib.rarithmetic import intmask @@ -46,11 +45,13 @@ def get_primitive_op_src(fullopname): assert '_' in fullopname, "%s: not a primitive op" % (fullopname,) typname, opname = fullopname.split('_', 1) - if opname not in FunctionByName and (opname + '_') in FunctionByName: - func = FunctionByName[opname + '_'] # or_, and_ + if hasattr(op, opname): + oper = getattr(op, opname) + elif hasattr(op, opname + '_'): + oper = getattr(op, opname + '_') # or_, and_ else: - assert opname in FunctionByName, "%s: not a primitive op" % (fullopname,) - func = FunctionByName[opname] + raise ValueError("%s: not a primitive op" % (fullopname,)) + func = oper.pyfunc if typname == 'char': # char_lt, char_eq, ... @@ -72,7 +73,7 @@ fullopname,) argtype = argtype_by_name[typname] - if opname in ops_unary: + if oper.arity == 1: def op_function(x): if not isinstance(x, argtype): raise TypeError("%r arg must be %s, got %r instead" % ( diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -120,7 +120,8 @@ covf = Constant(rarithmetic.ovfcheck) def check_syntax(opname): - exlis = operation.implicit_exceptions.get("%s_ovf" % (opname,), []) + oper = getattr(operation.op, opname + "_ovf") + exlis = oper.canraise if OverflowError not in exlis: raise Exception("ovfcheck in %s: Operation %s has no" " overflow variant" % (graph.name, opname)) @@ -495,11 +496,11 @@ # look for removable operations whose result is never used for i in range(len(block.operations)-1, -1, -1): op = block.operations[i] - if op.result not in read_vars: + if op.result not in read_vars: if canremove(op, block): del block.operations[i] - elif op.opname == 'simple_call': - # XXX we want to have a more effective and safe + elif op.opname == 'simple_call': + # XXX we want to have a more effective and safe # way to check if this operation has side effects # ... if op.args and isinstance(op.args[0], Constant): @@ -626,7 +627,7 @@ while candidates: cand, tgts = candidates.pop() - newexits = list(cand.exits) + newexits = list(cand.exits) for case, tgt in tgts: exit = cand.exits[case] rrenaming = dict(zip(tgt.inputargs,exit.args)) diff --git a/rpython/translator/test/test_translator.py b/rpython/translator/test/test_translator.py --- a/rpython/translator/test/test_translator.py +++ b/rpython/translator/test/test_translator.py @@ -8,7 +8,7 @@ d['key'] = 'value' def test_example(): - t = TranslationContext(simplifying=True) + t = TranslationContext() t.buildflowgraph(example) # this specific example triggered a bug in simplify.py #t.view() diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -21,7 +21,6 @@ class TranslationContext(object): FLOWING_FLAGS = { 'verbose': False, - 'simplifying': True, 'list_comprehension_operations': False, # True, - not super-tested } @@ -30,8 +29,7 @@ from rpython.config.translationoption import get_combined_translation_config config = get_combined_translation_config(translating=True) # ZZZ should go away in the end - for attr in ['verbose', 'simplifying', - 'list_comprehension_operations']: + for attr in ['verbose', 'list_comprehension_operations']: if attr in flowing_flags: setattr(config.translation, attr, flowing_flags[attr]) self.config = config @@ -54,8 +52,7 @@ if self.config.translation.verbose: log.start(nice_repr_for_func(func)) graph = build_flow(func) - if self.config.translation.simplifying: - simplify.simplify_graph(graph) + simplify.simplify_graph(graph) if self.config.translation.list_comprehension_operations: simplify.detect_list_comprehension(graph) if self.config.translation.verbose: From noreply at buildbot.pypy.org Thu Jul 11 15:55:21 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 11 Jul 2013 15:55:21 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: enable writing jitcodes Message-ID: <20130711135521.C77D21C0EF6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65343:82688b1c1e1a Date: 2013-07-11 15:53 +0200 http://bitbucket.org/pypy/pypy/changeset/82688b1c1e1a/ Log: enable writing jitcodes diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -13,7 +13,7 @@ class CodeWriter(object): callcontrol = None # for tests - debug = False + debug = True def __init__(self, cpu=None, jitdrivers_sd=[]): self.cpu = cpu From noreply at buildbot.pypy.org Thu Jul 11 16:31:44 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 11 Jul 2013 16:31:44 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: eh, we have to call this hint at the right level, otherwise the JIT Message-ID: <20130711143144.3E0541C0EF6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65344:42a502841f60 Date: 2013-07-11 16:30 +0200 http://bitbucket.org/pypy/pypy/changeset/42a502841f60/ Log: eh, we have to call this hint at the right level, otherwise the JIT does not see it at all diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -95,7 +95,6 @@ self.frame = None raise OperationError(space.w_StopIteration, space.w_None) else: - jit.hint(frame, force_virtualizable=True) return w_result # YIELDed finally: frame.f_backref = jit.vref_None diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -12,7 +12,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame -from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.pyopcode import ExitFrame, Yield from opcode import opmap PyFrame._virtualizable_ = ['last_instr', 'pycode', @@ -73,6 +73,11 @@ self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) is_being_profiled = self.is_being_profiled + except Yield: + self.last_exception = None + w_result = self.popvalue() + jit.hint(self, force_virtualizable=True) + return w_result except ExitFrame: self.last_exception = None return self.popvalue() From noreply at buildbot.pypy.org Thu Jul 11 16:50:30 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 11 Jul 2013 16:50:30 +0200 (CEST) Subject: [pypy-commit] pypy more-reusal-of-structures: park those changes on a branch for now Message-ID: <20130711145030.7A3131C1361@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: more-reusal-of-structures Changeset: r65345:cb84b0c40d85 Date: 2013-07-11 16:48 +0200 http://bitbucket.org/pypy/pypy/changeset/cb84b0c40d85/ Log: park those changes on a branch for now diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -13,7 +13,7 @@ class CodeWriter(object): callcontrol = None # for tests - debug = False + debug = True def __init__(self, cpu=None, jitdrivers_sd=[]): self.cpu = cpu diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -1,7 +1,7 @@ from rpython.tool.pairtype import pairtype from rpython.flowspace.model import Constant from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rlib import objectmodel, jit from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask, LONG_BIT @@ -50,7 +50,8 @@ assert callable(key_repr) self._key_repr_computer = key_repr else: - self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr) + self.external_key_repr, self.key_repr = self.pickrepr(key_repr, + is_key=True) if not isinstance(value_repr, rmodel.Repr): # not computed yet, done by setup() assert callable(value_repr) self._value_repr_computer = value_repr @@ -63,13 +64,11 @@ self.force_non_null = force_non_null # setup() needs to be called to finish this initialization - def _externalvsinternal(self, rtyper, item_repr): - return rmodel.externalvsinternal(self.rtyper, item_repr) - def _setup_repr(self): if 'key_repr' not in self.__dict__: key_repr = self._key_repr_computer() - self.external_key_repr, self.key_repr = self.pickkeyrepr(key_repr) + self.external_key_repr, self.key_repr = self.pickrepr(key_repr, + is_key=True) if 'value_repr' not in self.__dict__: self.external_value_repr, self.value_repr = self.pickrepr(self._value_repr_computer()) if isinstance(self.DICT, lltype.GcForwardReference): @@ -832,6 +831,8 @@ def recast(P, v): if isinstance(P, lltype.Ptr): + if P == llmemory.GCREF: + return lltype.cast_opaque_ptr(P, v) return lltype.cast_pointer(P, v) else: return v diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -32,13 +32,12 @@ class AbstractDictRepr(rmodel.Repr): - def pickrepr(self, item_repr): + def pickrepr(self, item_repr, is_key=False): if self.custom_eq_hash: return item_repr, item_repr else: - return self._externalvsinternal(self.rtyper, item_repr) - - pickkeyrepr = pickrepr + return rmodel.externalvsinternal(self.rtyper, item_repr, + is_key=is_key) def compact_repr(self): return 'DictR %s %s' % (self.key_repr.compact_repr(), self.value_repr.compact_repr()) diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -1,7 +1,7 @@ from rpython.annotator import model as annmodel, unaryop, binaryop, description from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.lltypesystem.lltype import (Void, Bool, Float, typeOf, LowLevelType, isCompatibleType) from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -433,13 +433,17 @@ Constant('gc')).value return alloc_flavor -def externalvsinternal(rtyper, item_repr): # -> external_item_repr, (internal_)item_repr +def externalvsinternal(rtyper, item_repr, is_key=False): # -> external_item_repr, (internal_)item_repr from rpython.rtyper import rclass - if (isinstance(item_repr, rclass.AbstractInstanceRepr) and - getattr(item_repr, 'gcflavor', 'gc') == 'gc'): - return item_repr, rclass.getinstancerepr(rtyper, None) + TP = item_repr.lowleveltype + if not is_key: + if isinstance(TP, lltype.Ptr) and TP.TO._gckind == 'gc': + return item_repr, rtyper.gcref_repr else: - return item_repr, item_repr + if (isinstance(item_repr, rclass.AbstractInstanceRepr) and + getattr(item_repr, 'gcflavor', 'gc') == 'gc'): + return item_repr, rclass.getinstancerepr(rtyper, None) + return item_repr, item_repr class DummyValueBuilder(object): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -23,6 +23,7 @@ from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive) +from rpython.rtyper.lltypesystem import llmemory from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError from rpython.rtyper.typesystem import LowLevelTypeSystem, ObjectOrientedTypeSystem @@ -68,6 +69,8 @@ self.typererror_count = 0 # make the primitive_to_repr constant mapping self.primitive_to_repr = {} + self.gcref_repr = self.getrepr(annmodel.lltype_to_annotation( + llmemory.GCREF)) if self.type_system.offers_exceptiondata: self.exceptiondata = self.type_system.exceptiondata.ExceptionData(self) else: @@ -918,7 +921,17 @@ def convertvar(self, v, r_from, r_to): assert isinstance(v, (Variable, Constant)) if r_from != r_to: - v = pair(r_from, r_to).convert_from_to(v, self) + # we add a special conversion of opaque pointers + if (r_from.lowleveltype == llmemory.GCREF and + isinstance(r_to.lowleveltype, Ptr)): + v = self.genop('cast_opaque_ptr', [v], + resulttype=r_to.lowleveltype) + elif (r_to.lowleveltype == llmemory.GCREF and + isinstance(r_from.lowleveltype, Ptr)): + v = self.genop('cast_opaque_ptr', [v], + resulttype=r_to.lowleveltype) + else: + v = pair(r_from, r_to).convert_from_to(v, self) if v is NotImplemented: raise TyperError("don't know how to convert from %r to %r" % (r_from, r_to)) diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -1056,18 +1056,6 @@ -class TestOOtype(BaseTestRdict, OORtypeMixin): - - def test_recursive(self): - def func(i): - dic = {i: {}} - dic[i] = dic - return dic[i] - res = self.interpret(func, [5]) - assert res.ll_get(5) is res - - # ____________________________________________________________ - class TestStress: def test_stress(self): From noreply at buildbot.pypy.org Thu Jul 11 17:36:42 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 17:36:42 +0200 (CEST) Subject: [pypy-commit] stmgc default: improve demo_random Message-ID: <20130711153642.E84291C1007@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r393:d791f50ecec8 Date: 2013-07-11 17:36 +0200 http://bitbucket.org/pypy/stmgc/changeset/d791f50ecec8/ Log: improve demo_random diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -137,7 +137,7 @@ return x; } -void push_roots() +void push_roots(int with_cache) { int i; for (i = 0; i < td.num_roots; i++) { @@ -145,19 +145,35 @@ if (td.roots[i]) stm_push_root(td.roots[i]); } + + if (with_cache) { + stm_push_root(NULL); + for (i = 0; i < CACHE_ENTRIES; i++) { + if (td.writeable[i]) + stm_push_root((gcptr)td.writeable[i]); + } + } } -__thread revision_t temp_cache[CACHE_ENTRIES]; -void pop_roots() +void pop_roots(int with_cache) { int i; + /* some objects may have changed positions */ + memset(td.writeable, 0, sizeof(td.writeable)); + + if (with_cache) { + gcptr obj = stm_pop_root(); + while (obj) { + CACHE_AT(td.writeable, obj) = obj; + obj = stm_pop_root(); + } + } + for (i = td.num_roots - 1; i >= 0; i--) { if (td.roots[i]) td.roots[i] = stm_pop_root(); check(td.roots[i]); } - /* some objects may have changed positions */ - memset(td.writeable, 0, sizeof(td.writeable)); } void del_root(int idx) @@ -170,9 +186,9 @@ nodeptr allocate_node() { nodeptr r; - push_roots(); + push_roots(1); r = (nodeptr)stm_allocate(sizeof(struct node), GCTID_STRUCT_NODE); - pop_roots(); + pop_roots(1); return r; } @@ -354,22 +370,22 @@ { int k = get_rand(100); if (k < 10) { - push_roots(); + push_roots(1); stm_push_root(p); stm_become_inevitable("fun"); p = stm_pop_root(); - pop_roots(); + pop_roots(1); } else if (k < 40) { - push_roots(); + push_roots(1); stmgc_minor_collect(); - pop_roots(); + pop_roots(1); p = NULL; } else if (k < 41 && DO_MAJOR_COLLECTS) { fprintf(stdout, "major collect\n"); - push_roots(); + push_roots(1); stmgcpage_possibly_major_collect(1); - pop_roots(); + pop_roots(1); p = NULL; } return p; @@ -534,8 +550,9 @@ p = id_hash_events(p, _r, _sr); else if (k < 8) p = rare_events(p, _r, _sr); - else if (get_rand(3) == 1) { + else if (get_rand(20) == 1) { // transaction break + fprintf(stdout, "|"); if (td.interruptible) return (gcptr)-1; // break current transaction_break(); @@ -547,7 +564,7 @@ void transaction_break() { - push_roots(); + push_roots(0); td.interruptible = 1; copy_roots(td.roots, td.roots_outside_perform, td.num_roots); @@ -559,9 +576,9 @@ copy_roots(td.roots_outside_perform, td.roots, td.num_roots); td.interruptible = 0; - pop_roots(); + pop_roots(0); - memset(&td.writeable, 0, sizeof(td.writeable)); + /* done by pop_roots() memset(&td.writeable, 0, sizeof(td.writeable)); */ } @@ -576,8 +593,8 @@ assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF); arg1 = stm_pop_root(); assert(arg1 == NULL); - pop_roots(); - push_roots(); + pop_roots(0); + push_roots(0); stm_push_root(arg1); stm_push_root(end_marker); From noreply at buildbot.pypy.org Thu Jul 11 17:36:44 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 17:36:44 +0200 (CEST) Subject: [pypy-commit] stmgc default: merge Message-ID: <20130711153644.15AC31C35B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r394:19e89845f0a0 Date: 2013-07-11 17:36 +0200 http://bitbucket.org/pypy/stmgc/changeset/19e89845f0a0/ Log: merge diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -199,7 +199,7 @@ DuObject *du_div(DuObject *cons, DuObject *locals) { int result = 0; - int first = 1; + int first = 1; while (cons != Du_None) { _du_read1(cons); @@ -208,12 +208,12 @@ _du_save2(next, locals); DuObject *obj = Du_Eval(expr, locals); - if (first) { - result = DuInt_AsInt(obj); - first = 0; - } else { - result /= DuInt_AsInt(obj); - } + if (first) { + result = DuInt_AsInt(obj); + first = 0; + } else { + result /= DuInt_AsInt(obj); + } _du_restore2(next, locals); cons = next; @@ -612,8 +612,8 @@ void Du_Initialize(int num_threads) { stm_initialize(); - all_threads_count = num_threads; - all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); + all_threads_count = num_threads; + all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn); DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); @@ -621,7 +621,7 @@ DuFrame_SetBuiltinMacro(Du_Globals, "+", du_add); DuFrame_SetBuiltinMacro(Du_Globals, "-", du_sub); DuFrame_SetBuiltinMacro(Du_Globals, "*", du_mul); - DuFrame_SetBuiltinMacro(Du_Globals, "/", du_div); + DuFrame_SetBuiltinMacro(Du_Globals, "/", du_div); DuFrame_SetBuiltinMacro(Du_Globals, "<", du_lt); DuFrame_SetBuiltinMacro(Du_Globals, "<=", du_le); DuFrame_SetBuiltinMacro(Du_Globals, "==", du_eq); @@ -642,12 +642,12 @@ DuFrame_SetBuiltinMacro(Du_Globals, "defun", du_defun); DuFrame_SetBuiltinMacro(Du_Globals, "car", du_car); DuFrame_SetBuiltinMacro(Du_Globals, "cdr", du_cdr); - DuFrame_SetBuiltinMacro(Du_Globals, "cons", du_cons); + DuFrame_SetBuiltinMacro(Du_Globals, "cons", du_cons); DuFrame_SetBuiltinMacro(Du_Globals, "not", du_not); DuFrame_SetBuiltinMacro(Du_Globals, "transaction", du_transaction); DuFrame_SetBuiltinMacro(Du_Globals, "sleepms", du_sleepms); DuFrame_SetBuiltinMacro(Du_Globals, "defined?", du_defined); - DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); + DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); DuFrame_SetBuiltinMacro(Du_Globals, "assert", du_assert); DuFrame_SetSymbolStr(Du_Globals, "None", Du_None); } diff --git a/duhton/test/test_cons.py b/duhton/test/test_cons.py --- a/duhton/test/test_cons.py +++ b/duhton/test/test_cons.py @@ -10,6 +10,8 @@ def test_pair(): assert run("(print (pair? 1))") == "0\n" assert run("(print (pair? (cons 1 2)))") == "1\n" + assert run("(setq x (cons 1 2)) (print (pair? x))") == "1\n" + assert run("(setq x 42) (print (pair? x))") == "0\n" def test_car_cdr(): assert run("(print (car (quote (2 3))))") == "2\n" diff --git a/duhton/test/test_int.py b/duhton/test/test_int.py --- a/duhton/test/test_int.py +++ b/duhton/test/test_int.py @@ -20,6 +20,10 @@ assert evaluate("(* 2 3 7)") == 42 assert evaluate("(* (+ 5 1) (+ 6 1))") == 42 +def test_div(): + assert evaluate("(/ 11 2)") == 5 + assert evaluate("(/ 29 2 3)") == 4 + def test_cmp(): assert evaluate("(< 6 6)") == 0 assert evaluate("(<= 6 6)") == 1 From noreply at buildbot.pypy.org Thu Jul 11 18:13:38 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 11 Jul 2013 18:13:38 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: hopefully in the right direction: now a single cond_call_stm_b because there may be many more variants in the future. Message-ID: <20130711161338.6353A1C0EF6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65346:e1aa4591dc07 Date: 2013-07-11 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/e1aa4591dc07/ Log: hopefully in the right direction: now a single cond_call_stm_b because there may be many more variants in the future. diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -958,11 +958,8 @@ def execute_cond_call_gc_wb_array(self, descr, a, b, c): py.test.skip("cond_call_gc_wb_array not supported") - def execute_cond_call_stm_wb(self, descr, a): - py.test.skip("cond_call_stm_wb not supported") - - def execute_cond_call_stm_rb(self, descr, a): - py.test.skip("cond_call_stm_rb not supported") + def execute_cond_call_stm_b(self, descr, a): + py.test.skip("cond_call_stm_b not supported") def execute_keepalive(self, descr, x): pass diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -270,23 +270,43 @@ rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) return rffi.cast(lltype.Signed, rst_addr) +class GcRootMap_stm(object): + is_shadow_stack = False # XXX: should it have an is_stmgc? -class WriteBarrierDescr(AbstractDescr): + def __init__(self, gcdescr): + pass + + def register_asm_addr(self, start, mark): + pass + + def get_root_stack_top_addr(self): + rst_addr = llop.gc_adr_of_root_stack_top(llmemory.Address) + return rffi.cast(lltype.Signed, rst_addr) + + +class BarrierDescr(AbstractDescr): def __init__(self, gc_ll_descr): self.llop1 = gc_ll_descr.llop1 self.returns_modified_object = False - self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType( + self.FUNCPTR = lltype.Ptr(lltype.FuncType( [llmemory.Address], lltype.Void)) - self.fielddescr_tid = gc_ll_descr.fielddescr_tid self.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.HDRPTR = gc_ll_descr.HDRPTR - # + + def repr_of_descr(self): + raise NotImplementedError + + def __repr(self): + raise NotImplementedError + +class WriteBarrierDescr(BarrierDescr): + def __init__(self, gc_ll_descr): + BarrierDescr.__init__(self, gc_ll_descr) + self.fielddescr_tid = gc_ll_descr.fielddescr_tid + GCClass = gc_ll_descr.GCClass - if GCClass is None: # for tests - return - self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = ( self.extract_flag_byte(self.jit_wb_if_flag)) @@ -325,14 +345,9 @@ return (i, struct.unpack('b', value[i])[0]) def get_barrier_funcptr(self, returns_modified_object): - assert returns_modified_object == self.returns_modified_object - llop1 = self.llop1 - if returns_modified_object: - funcptr = self.wb_failing_case_ptr - else: - FUNCTYPE = self.WB_FUNCPTR - funcptr = llop1.get_write_barrier_failing_case(FUNCTYPE) - return funcptr + assert not returns_modified_object + FUNCTYPE = self.FUNCPTR + return llop1.get_write_barrier_failing_case(FUNCTYPE) def get_write_barrier_fn(self, cpu, returns_modified_object): # must pass in 'self.returns_modified_object', to make sure that @@ -343,16 +358,13 @@ def get_write_barrier_from_array_fn(self, cpu): # returns a function with arguments [array, index, newvalue] - assert not self.returns_modified_object llop1 = self.llop1 funcptr = llop1.get_write_barrier_from_array_failing_case( - self.WB_FUNCPTR) + self.FUNCPTR) funcaddr = llmemory.cast_ptr_to_adr(funcptr) return cpu.cast_adr_to_int(funcaddr) # this may return 0 def has_write_barrier_from_array(self, cpu): - if self.returns_modified_object: - return False return self.get_write_barrier_from_array_fn(cpu) != 0 def get_wb_slowpath(self, withcards, withfloats): @@ -372,15 +384,11 @@ # the GC, and call it immediately funcptr = self.get_barrier_funcptr(returns_modified_object) res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) - if returns_modified_object: - return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) - else: - if returns_modified_object: - return gcref_struct -class STMBarrierDescr(WriteBarrierDescr): + +class STMBarrierDescr(BarrierDescr): def __init__(self, gc_ll_descr, stmcat, cfunc_name): - WriteBarrierDescr.__init__(self, gc_ll_descr) + BarrierDescr.__init__(self, gc_ll_descr) self.stmcat = stmcat self.returns_modified_object = True self.WB_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( @@ -394,8 +402,14 @@ _nowrapper=True) def repr_of_descr(self): - cat = self.stmcat - return cat + return self.stmcat + + def __repr__(self): + return '' % (self.repr_of_descr(),) + + def get_barrier_funcptr(self, returns_modified_object): + assert returns_modified_object + return self.wb_failing_case_ptr @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): @@ -403,8 +417,7 @@ # XXX: fastpath for Read and Write variants funcptr = self.get_barrier_funcptr(returns_modified_object) res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) - if returns_modified_object: - return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) + return llmemory.cast_adr_to_ptr(res, llmemory.GCREF) class STMReadBarrierDescr(STMBarrierDescr): @@ -412,9 +425,8 @@ assert stmcat == 'P2R' STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, 'stm_read_barrier') - - + class STMWriteBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): assert stmcat in ['P2W', 'R2W'] @@ -448,7 +460,9 @@ self._make_layoutbuilder() self._make_gcrootmap() self._setup_gcclass() - self._setup_tid() + if not self.stm: + # XXX: not needed with stm/shadowstack?? + self._setup_tid() self._setup_write_barrier() self._setup_str() self._make_functions(really_not_translated) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -9,8 +9,8 @@ # # Any SETFIELD_GC, SETARRAYITEM_GC, SETINTERIORFIELD_GC must be done on a # W object. The operation that forces an object p1 to be W is -# COND_CALL_STM_WB(p1, descr=x2Wdescr), for x in 'PGORL'. This -# COND_CALL_STM_WB is a bit special because if p1 is not W, it *replaces* +# COND_CALL_STM_B(p1, descr=x2Wdescr), for x in 'PGORL'. This +# COND_CALL_STM_B is a bit special because if p1 is not W, it *replaces* # its value with the W copy (by changing the register's value and # patching the stack location if any). It's still conceptually the same # object, but the pointer is different. @@ -121,10 +121,7 @@ except KeyError: return v_base # no barrier needed args = [v_base,] - if target_category == 'W': - op = rop.COND_CALL_STM_WB - else: - op = rop.COND_CALL_STM_RB + op = rop.COND_CALL_STM_B self.newops.append(ResOperation(op, args, None, descr=write_barrier_descr)) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -44,7 +44,7 @@ jump() """, """ [p1, p2] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) jump() """) @@ -59,7 +59,7 @@ """, """ [p1, p2] p3 = same_as(ConstPtr(t)) - cond_call_stm_wb(p3, descr=P2Wdescr) + cond_call_stm_b(p3, descr=P2Wdescr) setfield_gc(p3, p2, descr=tzdescr) jump() """, t=NULL) @@ -87,9 +87,9 @@ jump() """, """ [p1, p2, p3, p4] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) - cond_call_stm_wb(p3, descr=P2Wdescr) + cond_call_stm_b(p3, descr=P2Wdescr) setfield_gc(p3, p4, descr=tzdescr) jump() """) @@ -102,7 +102,7 @@ jump() """, """ [p1, p2, i3] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) setfield_gc(p1, i3, descr=tydescr) jump() @@ -117,10 +117,10 @@ jump(p1) """, """ [p1, p2, i3] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, p2, descr=tzdescr) label(p1, i3) - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, i3, descr=tydescr) jump(p1) """) @@ -162,7 +162,7 @@ jump(p2) """, """ [p1] - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) """) @@ -177,7 +177,7 @@ """, """ [p1] p3 = same_as(ConstPtr(t)) - cond_call_stm_rb(p3, descr=P2Rdescr) + cond_call_stm_b(p3, descr=P2Rdescr) p2 = getfield_gc(p3, descr=tzdescr) jump(p2) """, t=NULL) @@ -190,7 +190,7 @@ jump(i3) """, """ [p1, i2] - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p1, descr=P2Rdescr) i3 = getarrayitem_gc(p1, i2, descr=adescr) jump(i3) """) @@ -202,7 +202,7 @@ jump(i3) """, """ [p1, i2] - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p1, descr=P2Rdescr) i3 = getinteriorfield_gc(p1, i2, descr=adescr) jump(i3) """) @@ -215,7 +215,7 @@ jump(p2, i2) """, """ [p1] - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) i2 = getfield_gc(p1, descr=tydescr) jump(p2, i2) @@ -229,9 +229,9 @@ jump(p2, i2) """, """ [p1] - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_rb(p2, descr=P2Rdescr) + cond_call_stm_b(p2, descr=P2Rdescr) i2 = getfield_gc(p2, descr=tydescr) jump(p2, i2) """) @@ -247,10 +247,10 @@ jump(p1) """, """ [p1] - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p1, descr=P2Rdescr) i1 = getfield_gc(p1, descr=tydescr) i2 = int_add(i1, 1) - cond_call_stm_wb(p1, descr=R2Wdescr) + cond_call_stm_b(p1, descr=R2Wdescr) setfield_gc(p1, i2, descr=tydescr) jump(p1) """) @@ -263,7 +263,7 @@ jump(p2) """, """ [p1] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, 123, descr=tydescr) p2 = getfield_gc(p1, descr=tzdescr) jump(p2) @@ -295,10 +295,10 @@ jump(p2) """, """ [p1] - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p1, descr=P2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) call(p2) - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, 5, descr=tydescr) jump(p2) """) @@ -358,9 +358,9 @@ jump() """, """ [p1, i1, p2, p3, i3, p4] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setarrayitem_gc(p1, i1, p2, descr=adescr) - cond_call_stm_wb(p3, descr=P2Wdescr) + cond_call_stm_b(p3, descr=P2Wdescr) setarrayitem_gc(p3, i3, p4, descr=adescr) jump() """) @@ -374,7 +374,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setarrayitem_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setarrayitem_gc(p1, i3, p3, descr=adescr) @@ -390,7 +390,7 @@ jump() """, """ [p1, p2, i2, p3, i3] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setinteriorfield_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setinteriorfield_gc(p1, i3, p3, descr=adescr) @@ -405,7 +405,7 @@ jump() """, """ [p1, i2, i3] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) jump() @@ -432,11 +432,11 @@ jump(i2, p7) """ % op, """ [i1, i2, i3, p7] - cond_call_stm_wb(p7, descr=P2Wdescr) + cond_call_stm_b(p7, descr=P2Wdescr) setfield_gc(p7, 10, descr=tydescr) $INEV %s - cond_call_stm_wb(p7, descr=P2Wdescr) + cond_call_stm_b(p7, descr=P2Wdescr) setfield_gc(p7, 20, descr=tydescr) jump(i2, p7) """ % op, calldescr2=calldescr2) @@ -448,8 +448,8 @@ jump() """, """ [p1, p2, i1, i2, i3] - cond_call_stm_wb(p2, descr=P2Wdescr) - cond_call_stm_rb(p1, descr=P2Rdescr) + cond_call_stm_b(p2, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Rdescr) copystrcontent(p1, p2, i1, i2, i3) jump() """) @@ -468,7 +468,7 @@ jump(p1) """ % op, """ [p1] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s setfield_gc(p1, 20, descr=tydescr) @@ -491,10 +491,10 @@ jump(p1) """ % op, """ [p1] - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, 10, descr=tydescr) %s - cond_call_stm_wb(p1, descr=P2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, 20, descr=tydescr) jump(p1) """ % op, calldescr2=calldescr2) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -346,8 +346,7 @@ rop.CALL_ASSEMBLER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, - rop.COND_CALL_STM_WB, - rop.COND_CALL_STM_RB, + rop.COND_CALL_STM_B, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -500,8 +500,7 @@ 'STRSETITEM/3', 'UNICODESETITEM/3', #'RUNTIMENEW/1', # ootype operation - 'COND_CALL_STM_WB/1d', # objptr (write barrier) - 'COND_CALL_STM_RB/1d', # objptr (read barrier) + 'COND_CALL_STM_B/1d', # objptr (write/read barrier) 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) 'DEBUG_MERGE_POINT/*', # debugging only diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -45,7 +45,10 @@ VISIT_FPTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void)) - TRANSLATION_PARAMS = {} + minimal_size_in_nursery = llmemory.sizeof(HDR) + + TRANSLATION_PARAMS = { + } def setup(self): # Hack: MovingGCBase.setup() sets up stuff related to id(), which @@ -90,6 +93,15 @@ (obj + offset_to_length).signed[0] = length return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + @classmethod + def JIT_max_size_of_young_obj(cls): + return -1 # XXX: should not be used + + @classmethod + def JIT_minimal_size_in_nursery(cls): + return cls.minimal_size_in_nursery + def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" if gen > 0: diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -46,6 +46,9 @@ def gc_header_for(self, obj, needs_hash=False): return self.gcdata.gc.gcheaderbuilder.header_of_object(obj) + def gct_gc_adr_of_root_stack_top(self, hop): + hop.genop("stm_get_root_stack_top") + def _gct_with_roots_pushed(self, hop): livevars = self.push_roots(hop) self.default(hop) diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -94,6 +94,11 @@ return '%s = (%s)stm_pop_root();' % ( arg0, cdecl(funcgen.lltypename(op.args[0]), '')) +def stm_get_root_stack_top(funcgen, op): + result = funcgen.expr(op.result) + return '%s = (%s)stm_shadowstack;' % ( + result, cdecl(funcgen.lltypename(op.result), '')) + def stm_allocate(funcgen, op): arg0 = funcgen.expr(op.args[0]) arg1 = funcgen.expr(op.args[1]) From noreply at buildbot.pypy.org Thu Jul 11 19:00:48 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 11 Jul 2013 19:00:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Update whatsnew for branch flowoperators Message-ID: <20130711170048.263631C0EF6@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r65347:6011e077f608 Date: 2013-07-11 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/6011e077f608/ Log: Update whatsnew for branch flowoperators diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,7 @@ .. branch: improve-str2charp Improve the performance of I/O writing up to 15% by using memcpy instead of copying char-by-char in str2charp and get_nonmovingbuffer + +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. From noreply at buildbot.pypy.org Thu Jul 11 19:47:59 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 11 Jul 2013 19:47:59 +0200 (CEST) Subject: [pypy-commit] pypy default: fix rpython-level exception Message-ID: <20130711174759.486201C0EF6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65348:f2f657d2fa00 Date: 2013-07-11 19:34 +0200 http://bitbucket.org/pypy/pypy/changeset/f2f657d2fa00/ Log: fix rpython-level exception diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -74,7 +74,7 @@ return space.newtuple([w_fileobj, w_filename, w_import_info]) def load_module(space, w_name, w_file, w_filename, w_info): - w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info) + w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info, 3) filename = space.str0_w(w_filename) filemode = space.str_w(w_filemode) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -679,6 +679,10 @@ assert module.__name__ == 'a' assert module.__file__ == 'invalid_path_name' + def test_crash_load_module(self): + import imp + raises(ValueError, imp.load_module, "", "", "", [1, 2, 3, 4]) + class TestAbi: def test_abi_tag(self): From noreply at buildbot.pypy.org Thu Jul 11 19:48:00 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 11 Jul 2013 19:48:00 +0200 (CEST) Subject: [pypy-commit] pypy default: a few more missing expected_lengths Message-ID: <20130711174800.C211C1C1007@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65349:0dbb3ef0039b Date: 2013-07-11 19:47 +0200 http://bitbucket.org/pypy/pypy/changeset/0dbb3ef0039b/ Log: a few more missing expected_lengths diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -371,7 +371,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -226,6 +226,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -473,7 +473,7 @@ option_ptr = rffi.cast(rffi.INTP, value_ptr) option_ptr[0] = space.int_w(w_option) elif cmd == _c.SIO_KEEPALIVE_VALS: - w_onoff, w_time, w_interval = space.unpackiterable(w_option) + w_onoff, w_time, w_interval = space.unpackiterable(w_option, 3) option_ptr = rffi.cast(lltype.Ptr(_c.tcp_keepalive), value_ptr) option_ptr.c_onoff = space.uint_w(w_onoff) option_ptr.c_keepalivetime = space.uint_w(w_time) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -318,7 +318,7 @@ if not base.issequence_w(space, w_shape): w_shape = space.newtuple([w_shape,]) else: - w_fldname, w_flddesc = space.fixedview(w_elem) + w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) fldname = space.str_w(w_fldname) if fldname in fields: From noreply at buildbot.pypy.org Thu Jul 11 21:57:18 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 11 Jul 2013 21:57:18 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: fix ztranslation, break zjit and compile Message-ID: <20130711195718.00D3B1C1361@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65351:72758565e385 Date: 2013-07-11 18:47 +0300 http://bitbucket.org/pypy/pypy/changeset/72758565e385/ Log: fix ztranslation, break zjit and compile diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -16,7 +16,7 @@ else: w_ret = space.allocate_instance(W_NDimArray, w_cls) W_NDimArray.__init__(w_ret, impl) - space.call_method(w_ret, space.wrap('__array_finalize__'), w_instance) + space.call_method(w_ret, '__array_finalize__', w_instance) return w_ret class ArrayArgumentException(Exception): @@ -45,8 +45,7 @@ w_ret = space.allocate_instance(W_NDimArray, space.type(w_subtype)) W_NDimArray.__init__(w_ret, impl) assert isinstance(w_ret, W_NDimArray) - space.call_function(space.getattr(w_ret, - space.wrap('__array_finalize__')), w_subtype) + space.call_method(w_ret, '__array_finalize__', w_subtype) return w_ret return W_NDimArray(impl) @@ -65,8 +64,7 @@ if w_subtype: w_ret = space.allocate_instance(W_NDimArray, w_subtype) W_NDimArray.__init__(w_ret, impl) - space.call_function(space.getattr(w_ret, - space.wrap('__array_finalize__')), w_subtype) + space.call_method(w_ret, '__array_finalize__', w_subtype) return w_ret return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -630,7 +630,6 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - print w_dtype, w_type if not w_type and w_dtype: try: if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): @@ -1150,7 +1149,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) + return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): @@ -1163,7 +1162,7 @@ w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) w_arr.fill(one) - return w_arr + return space.wrap(w_arr) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) From noreply at buildbot.pypy.org Thu Jul 11 21:57:16 2013 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 11 Jul 2013 21:57:16 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: hack at compile to support ndarray subclasses Message-ID: <20130711195716.D6F141C1007@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65350:3464b2eff3be Date: 2013-07-09 19:46 +0300 http://bitbucket.org/pypy/pypy/changeset/3464b2eff3be/ Log: hack at compile to support ndarray subclasses diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -35,10 +35,12 @@ class BadToken(Exception): pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -184,17 +186,28 @@ def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) - return False - #return w_obj.boolval + return w_obj.boolval def is_w(self, w_obj, w_what): return w_obj is w_what + def issubtype(self, w_type1, w_type2): + if not w_type2: + return self.wrap(False) + return self.wrap(issubclass(w_type1, w_type2)) + def type(self, w_obj): - return w_obj.tp + try: + return w_obj.tp + except AttributeError: + if isinstance(w_obj, W_NDimArray): + return W_NDimArray + if issubclass(w_obj, W_NDimArray): + return W_NDimArray + return None def gettypefor(self, w_obj): - return None + return self.type(w_obj) def call_function(self, tp, w_dtype): return w_dtype @@ -205,7 +218,9 @@ return what def allocate_instance(self, klass, w_subtype): - return instantiate(klass) + inst = instantiate(klass) + inst.tp = klass + return inst def newtuple(self, list_w): return ListObject(list_w) @@ -329,6 +344,8 @@ self.name = name.strip(" ") def execute(self, interp): + if self.name == 'None': + return None return interp.variables[self.name] def __repr__(self): @@ -451,6 +468,32 @@ def __repr__(self): return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) +class ArrayClass(Node): + def __init__(self): + self.v = W_NDimArray + + def execute(self, interp): + return self.v + + def __repr__(self): + return '' + +class DtypeClass(Node): + def __init__(self, dt): + self.v = dt + + def execute(self, interp): + if self.v == 'int': + dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'float': + dtype = get_dtype_cache(interp.space).w_float64dtype + else: + raise BadToken('unknown v to dtype "%s"' % self.v) + return dtype + + def __repr__(self): + return '' % self.v + class Execute(Node): def __init__(self, expr): self.expr = expr @@ -533,6 +576,14 @@ w_res = where(interp.space, arr, arg1, arg2) else: assert False + elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: + if len(self.args) != 2: + raise ArgumentMismatch + arg = self.args[1].execute(interp) + if self.name == 'view': + w_res = arr.descr_view(interp.space, arg) + else: + assert False else: raise WrongFunctionName if isinstance(w_res, W_NDimArray): @@ -652,6 +703,12 @@ if token.name == 'identifier': if tokens.remaining() and tokens.get(0).name == 'paren_left': stack.append(self.parse_function_call(token.v, tokens)) + elif token.v.strip(' ') == 'ndarray': + stack.append(ArrayClass()) + elif token.v.strip(' ') == 'int': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'float': + stack.append(DtypeClass('float')) else: stack.append(Variable(token.v)) elif token.name == 'array_left': diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -260,8 +260,9 @@ return self.implementation.get_scalar_value() def descr_copy(self, space): - return wrap_impl(space, space.type(self), - self, self.implementation.copy(space)) + copy = self.implementation.copy(space) + w_subtype = space.type(self) + return wrap_impl(space, w_subtype, self, copy) def descr_get_real(self, space): return wrap_impl(space, space.type(self), self, @@ -629,12 +630,13 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : + print w_dtype, w_type if not w_type and w_dtype: try: - if w_dtype.issubtype(space.gettypefor(W_NDimArray)): + if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): w_type = w_dtype w_dtype = None - except: + except (OperationError, TypeError): pass if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -33,11 +33,11 @@ lhs_for_subtype = w_lhs rhs_for_subtype = w_rhs #it may be something like a FlatIter, which is not an ndarray - if not lhs_type.issubtype(w_ndarray): + if not space.is_true(space.issubtype(lhs_type, w_ndarray)): lhs_type = space.type(w_lhs.base) lhs_for_subtype = w_lhs.base - if not rhs_type.issubtype(w_ndarray): - rhs_type = space.gettypefor(w_rhs.base) + if not space.is_true(space.issubtype(rhs_type, w_ndarray)): + rhs_type = space.type(w_rhs.base) rhs_for_subtype = w_rhs.base if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray): w_lhs, w_rhs = w_rhs, w_lhs diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -2,7 +2,7 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, - FunctionCall, FakeSpace) + FunctionCall, FakeSpace, W_NDimArray) class TestCompiler(object): @@ -84,6 +84,7 @@ assert interp.code.statements[0] == Assignment( 'a', Operator(Variable('b'), "+", FloatConstant(3))) + class TestRunner(object): def run(self, code): interp = numpy_compile(code) @@ -290,4 +291,32 @@ ''') assert interp.results[0].real == 0 assert interp.results[0].imag == 1 - + + def test_view_none(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = None + c = view(a, b) + c -> 0 + ''') + assert interp.results[0].value == 1 + + def test_view_ndarray(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = ndarray + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + + def test_view_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) From noreply at buildbot.pypy.org Thu Jul 11 21:58:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 21:58:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue 1533: fix an RPython-level OverflowError for Message-ID: <20130711195821.419911C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65352:6ac45c25b393 Date: 2013-07-11 21:55 +0200 http://bitbucket.org/pypy/pypy/changeset/6ac45c25b393/ Log: Issue 1533: fix an RPython-level OverflowError for space.float_w(w_big_long_number). diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1107,6 +1107,14 @@ S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) S2E.get_ffi_type() # does not hang + def test_overflow_error(self): + import _rawffi + A = _rawffi.Array('d') + arg1 = A(1) + raises(OverflowError, "arg1[0] = 10**900") + arg1.free() + + class AppTestAutoFree: spaceconfig = dict(usemodules=['_rawffi', 'struct']) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -126,10 +126,7 @@ return W_ComplexObject(w_int.intval, 0.0) def delegate_Long2Complex(space, w_long): - try: - dval = w_long.tofloat() - except OverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(str(e))) + dval = w_long.tofloat(space) return W_ComplexObject(dval, 0.0) def delegate_Float2Complex(space, w_float): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -62,11 +62,7 @@ # long-to-float delegation def delegate_Long2Float(space, w_longobj): - try: - return W_FloatObject(w_longobj.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return W_FloatObject(w_longobj.tofloat(space)) # float__Float is supposed to do nothing, unless it has diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -26,8 +26,12 @@ def longval(self): return self.num.tolong() - def tofloat(self): - return self.num.tofloat() + def tofloat(self, space): + try: + return self.num.tofloat() + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("long int too large to convert to float")) def toint(self): return self.num.toint() @@ -66,7 +70,7 @@ return w_self.num def float_w(self, space): - return self.num.tofloat() + return self.tofloat(space) def int(self, space): if (type(self) is not W_LongObject and @@ -124,11 +128,7 @@ return long__Long(space, w_value) def float__Long(space, w_longobj): - try: - return space.newfloat(w_longobj.num.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return space.newfloat(w_longobj.tofloat(space)) def repr__Long(space, w_long): return space.wrap(w_long.num.repr()) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -18,6 +18,12 @@ w_obj = fromlong(42) assert space.unwrap(w_obj) == 42 + def test_overflow_error(self): + space = self.space + fromlong = lobj.W_LongObject.fromlong + w_big = fromlong(10**900) + space.raises_w(space.w_OverflowError, space.float_w, w_big) + def test_rint_variants(self): py.test.skip("XXX broken!") from rpython.rtyper.tool.rfficache import platform From noreply at buildbot.pypy.org Thu Jul 11 22:00:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 11 Jul 2013 22:00:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Another test now fixed (by amaury). Message-ID: <20130711200001.522351C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65353:9919fe43fdef Date: 2013-07-11 21:59 +0200 http://bitbucket.org/pypy/pypy/changeset/9919fe43fdef/ Log: Another test now fixed (by amaury). diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -43,6 +43,7 @@ assert isinstance(res, str) rctime.ctime(rctime.time()) raises(ValueError, rctime.ctime, 1E200) + raises(OverflowError, rctime.ctime, 10**900) def test_gmtime(self): import time as rctime From noreply at buildbot.pypy.org Thu Jul 11 22:18:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 11 Jul 2013 22:18:16 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: reset the virtualizable token if we just forced it by hand Message-ID: <20130711201816.342311C303E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65354:8088bbf5b524 Date: 2013-07-11 22:17 +0200 http://bitbucket.org/pypy/pypy/changeset/8088bbf5b524/ Log: reset the virtualizable token if we just forced it by hand diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2550,7 +2550,9 @@ self.execute_and_record(rop.SETARRAYITEM_GC, descr, abox, ConstInt(j), itembox) assert i + 1 == len(self.virtualizable_boxes) - + # we're during tracing, so we should not execute it + self.history.record(rop.SETFIELD_GC, [vbox, self.cpu.ts.CONST_NULL], + None, descr=vinfo.vable_token_descr) def replace_box(self, oldbox, newbox): assert isinstance(oldbox, Box) diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1577,7 +1577,7 @@ l = [op for op in bridge.operations if op.getopnum() == rop.SETFIELD_GC] assert "'inst_x'" in str(l[0].getdescr().realdescrref()) - assert len(l) == 1 # no vable token + assert len(l) == 2 # vable token set to null l = [op for op in bridge.operations if op.getopnum() == rop.GUARD_NOT_FORCED_2] assert len(l) == 0 From noreply at buildbot.pypy.org Thu Jul 11 22:34:21 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 11 Jul 2013 22:34:21 +0200 (CEST) Subject: [pypy-commit] pypy default: in dict.update check for the common case first (arg is a dict) Message-ID: <20130711203421.06D741C303E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65355:079e1c6fa066 Date: 2013-07-09 01:43 +0200 http://bitbucket.org/pypy/pypy/changeset/079e1c6fa066/ Log: in dict.update check for the common case first (arg is a dict) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1039,15 +1039,18 @@ def update1(space, w_dict, w_data): - if space.findattr(w_data, space.wrap("keys")) is None: + if isinstance(w_data, W_DictMultiObject): # optimization case only + update1_dict_dict(space, w_dict, w_data) + return + w_method = space.findattr(w_data, space.wrap("keys")) + if w_method is None: # no 'keys' method, so we assume it is a sequence of pairs - update1_pairs(space, w_dict, w_data) + data_w = space.listview(w_data) + update1_pairs(space, w_dict, data_w) else: - if isinstance(w_data, W_DictMultiObject): # optimization case only - update1_dict_dict(space, w_dict, w_data) - else: - # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" - update1_keys(space, w_dict, w_data) + # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" + data_w = space.listview(space.call_function(w_method)) + update1_keys(space, w_dict, data_w) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -1061,8 +1064,8 @@ w_dict.setitem(w_key, w_value) -def update1_pairs(space, w_dict, w_data): - for w_pair in space.listview(w_data): +def update1_pairs(space, w_dict, data_w): + for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: raise OperationError(space.w_ValueError, @@ -1071,9 +1074,8 @@ w_dict.setitem(w_key, w_value) -def update1_keys(space, w_dict, w_data): - w_keys = space.call_method(w_data, "keys") - for w_key in space.listview(w_keys): +def update1_keys(space, w_dict, data_w): + for w_key in data_w: w_value = space.getitem(w_data, w_key) w_dict.setitem(w_key, w_value) From noreply at buildbot.pypy.org Thu Jul 11 22:34:22 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 11 Jul 2013 22:34:22 +0200 (CEST) Subject: [pypy-commit] pypy default: fix typo Message-ID: <20130711203422.434CB1C303E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65356:902d013fd2ce Date: 2013-07-11 22:31 +0200 http://bitbucket.org/pypy/pypy/changeset/902d013fd2ce/ Log: fix typo diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -180,8 +180,7 @@ if not space.isinstance_w(w_other, space.w_set): return space.w_False - # XXX there is no test_buildinshortcut.py - # tested in test_buildinshortcut.py + # tested in test_builtinshortcut.py # XXX do not make new setobject here w_other_as_set = self._newobj(space, w_other) return space.wrap(self.equals(w_other_as_set)) From noreply at buildbot.pypy.org Thu Jul 11 22:34:23 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 11 Jul 2013 22:34:23 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130711203423.61EB31C303E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65357:198a48545f50 Date: 2013-07-11 22:32 +0200 http://bitbucket.org/pypy/pypy/changeset/198a48545f50/ Log: merge diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1039,15 +1039,18 @@ def update1(space, w_dict, w_data): - if space.findattr(w_data, space.wrap("keys")) is None: + if isinstance(w_data, W_DictMultiObject): # optimization case only + update1_dict_dict(space, w_dict, w_data) + return + w_method = space.findattr(w_data, space.wrap("keys")) + if w_method is None: # no 'keys' method, so we assume it is a sequence of pairs - update1_pairs(space, w_dict, w_data) + data_w = space.listview(w_data) + update1_pairs(space, w_dict, data_w) else: - if isinstance(w_data, W_DictMultiObject): # optimization case only - update1_dict_dict(space, w_dict, w_data) - else: - # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" - update1_keys(space, w_dict, w_data) + # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" + data_w = space.listview(space.call_function(w_method)) + update1_keys(space, w_dict, data_w) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -1061,8 +1064,8 @@ w_dict.setitem(w_key, w_value) -def update1_pairs(space, w_dict, w_data): - for w_pair in space.listview(w_data): +def update1_pairs(space, w_dict, data_w): + for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: raise OperationError(space.w_ValueError, @@ -1071,9 +1074,8 @@ w_dict.setitem(w_key, w_value) -def update1_keys(space, w_dict, w_data): - w_keys = space.call_method(w_data, "keys") - for w_key in space.listview(w_keys): +def update1_keys(space, w_dict, data_w): + for w_key in data_w: w_value = space.getitem(w_data, w_key) w_dict.setitem(w_key, w_value) From noreply at buildbot.pypy.org Thu Jul 11 22:34:24 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 11 Jul 2013 22:34:24 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130711203424.955F91C303E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65358:1151bfc2e7f9 Date: 2013-07-11 22:33 +0200 http://bitbucket.org/pypy/pypy/changeset/1151bfc2e7f9/ Log: merge diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1107,6 +1107,14 @@ S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) S2E.get_ffi_type() # does not hang + def test_overflow_error(self): + import _rawffi + A = _rawffi.Array('d') + arg1 = A(1) + raises(OverflowError, "arg1[0] = 10**900") + arg1.free() + + class AppTestAutoFree: spaceconfig = dict(usemodules=['_rawffi', 'struct']) diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -43,6 +43,7 @@ assert isinstance(res, str) rctime.ctime(rctime.time()) raises(ValueError, rctime.ctime, 1E200) + raises(OverflowError, rctime.ctime, 10**900) def test_gmtime(self): import time as rctime diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -126,10 +126,7 @@ return W_ComplexObject(w_int.intval, 0.0) def delegate_Long2Complex(space, w_long): - try: - dval = w_long.tofloat() - except OverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(str(e))) + dval = w_long.tofloat(space) return W_ComplexObject(dval, 0.0) def delegate_Float2Complex(space, w_float): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -62,11 +62,7 @@ # long-to-float delegation def delegate_Long2Float(space, w_longobj): - try: - return W_FloatObject(w_longobj.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return W_FloatObject(w_longobj.tofloat(space)) # float__Float is supposed to do nothing, unless it has diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -26,8 +26,12 @@ def longval(self): return self.num.tolong() - def tofloat(self): - return self.num.tofloat() + def tofloat(self, space): + try: + return self.num.tofloat() + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("long int too large to convert to float")) def toint(self): return self.num.toint() @@ -66,7 +70,7 @@ return w_self.num def float_w(self, space): - return self.num.tofloat() + return self.tofloat(space) def int(self, space): if (type(self) is not W_LongObject and @@ -124,11 +128,7 @@ return long__Long(space, w_value) def float__Long(space, w_longobj): - try: - return space.newfloat(w_longobj.num.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return space.newfloat(w_longobj.tofloat(space)) def repr__Long(space, w_long): return space.wrap(w_long.num.repr()) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -18,6 +18,12 @@ w_obj = fromlong(42) assert space.unwrap(w_obj) == 42 + def test_overflow_error(self): + space = self.space + fromlong = lobj.W_LongObject.fromlong + w_big = fromlong(10**900) + space.raises_w(space.w_OverflowError, space.float_w, w_big) + def test_rint_variants(self): py.test.skip("XXX broken!") from rpython.rtyper.tool.rfficache import platform From noreply at buildbot.pypy.org Fri Jul 12 00:13:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 12 Jul 2013 00:13:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix. (no cookies :-) Message-ID: <20130711221358.617431C02E4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65359:4229b0f4399f Date: 2013-07-12 00:13 +0200 http://bitbucket.org/pypy/pypy/changeset/4229b0f4399f/ Log: Test and fix. (no cookies :-) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1050,7 +1050,7 @@ else: # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" data_w = space.listview(space.call_function(w_method)) - update1_keys(space, w_dict, data_w) + update1_keys(space, w_dict, w_data, data_w) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -1074,7 +1074,7 @@ w_dict.setitem(w_key, w_value) -def update1_keys(space, w_dict, data_w): +def update1_keys(space, w_dict, w_data, data_w): for w_key in data_w: w_value = space.getitem(w_data, w_key) w_dict.setitem(w_key, w_value) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -367,6 +367,16 @@ d.update({'foo': 'bar'}, baz=1) assert d == {'foo': 'bar', 'baz': 1} + def test_update_keys_method(self): + class Foo(object): + def keys(self): + return [4, 1] + def __getitem__(self, key): + return key * 10 + d = {} + d.update(Foo()) + assert d == {1: 10, 4: 40} + def test_values(self): d = {1: 2, 3: 4} vals = d.values() From noreply at buildbot.pypy.org Fri Jul 12 01:20:47 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 01:20:47 +0200 (CEST) Subject: [pypy-commit] pypy default: A failing (xfail) test for VirtualizableAnalyzer with indirect construction of an object. Message-ID: <20130711232047.4845D1C356B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65360:d1430383faa5 Date: 2013-07-12 09:19 +1000 http://bitbucket.org/pypy/pypy/changeset/d1430383faa5/ Log: A failing (xfail) test for VirtualizableAnalyzer with indirect construction of an object. diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,15 +1,22 @@ +import pytest + +from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, + EffectInfo, VirtualizableAnalyzer) +from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.rclass import OBJECT -from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.ootypesystem import ootype -from rpython.jit.codewriter.effectinfo import effectinfo_from_writeanalyze,\ - EffectInfo +from rpython.translator.translator import TranslationContext, graphof -class FakeCPU: + +class FakeCPU(object): def fielddescrof(self, T, fieldname): return ('fielddescr', T, fieldname) + def arraydescrof(self, A): return ('arraydescr', A) + def test_no_oopspec_duplicate(): # check that all the various EffectInfo.OS_* have unique values oopspecs = set() @@ -18,6 +25,7 @@ assert value not in oopspecs oopspecs.add(value) + def test_include_read_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) @@ -26,6 +34,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) @@ -34,6 +43,7 @@ assert not effectinfo.readonly_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) @@ -43,6 +53,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) @@ -51,6 +62,7 @@ assert not effectinfo.write_descrs_fields assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + def test_dont_include_read_and_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a"), @@ -60,6 +72,7 @@ assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] assert not effectinfo.write_descrs_arrays + def test_dont_include_read_and_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A)), @@ -78,6 +91,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -85,6 +99,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -92,6 +107,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_ooarray_of_void(): effects = frozenset([("array", ootype.Array(ootype.Void))]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -99,9 +115,44 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_instance_with_void(): effects = frozenset([("struct", ootype.Instance("x", ootype.ROOT, {"a": ootype.Void}), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) assert not effectinfo.readonly_descrs_fields assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + + +class TestVirtualizableAnalyzer(object): + def analyze(self, func, sig): + t = TranslationContext() + t.buildannotator().build_types(func, sig) + t.buildrtyper().specialize() + fgraph = graphof(t, func) + return VirtualizableAnalyzer(t).analyze(fgraph.startblock.operations[0]) + + @pytest.mark.xfail + def test_constructor(self): + class A(object): + x = 1 + + class B(A): + x = 2 + + @jit.elidable + def g(cls): + return cls() + + def f(x): + if x: + cls = A + else: + cls = B + return g(cls).x + + def entry(x): + return f(x) + + res = self.analyze(entry, [int]) + assert not res From noreply at buildbot.pypy.org Fri Jul 12 01:20:49 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 01:20:49 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20130711232049.0329C1C356B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65361:4f246f62b2ef Date: 2013-07-12 09:19 +1000 http://bitbucket.org/pypy/pypy/changeset/4f246f62b2ef/ Log: merged upstream diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,7 @@ .. branch: improve-str2charp Improve the performance of I/O writing up to 15% by using memcpy instead of copying char-by-char in str2charp and get_nonmovingbuffer + +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -371,7 +371,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -226,6 +226,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1107,6 +1107,14 @@ S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) S2E.get_ffi_type() # does not hang + def test_overflow_error(self): + import _rawffi + A = _rawffi.Array('d') + arg1 = A(1) + raises(OverflowError, "arg1[0] = 10**900") + arg1.free() + + class AppTestAutoFree: spaceconfig = dict(usemodules=['_rawffi', 'struct']) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -473,7 +473,7 @@ option_ptr = rffi.cast(rffi.INTP, value_ptr) option_ptr[0] = space.int_w(w_option) elif cmd == _c.SIO_KEEPALIVE_VALS: - w_onoff, w_time, w_interval = space.unpackiterable(w_option) + w_onoff, w_time, w_interval = space.unpackiterable(w_option, 3) option_ptr = rffi.cast(lltype.Ptr(_c.tcp_keepalive), value_ptr) option_ptr.c_onoff = space.uint_w(w_onoff) option_ptr.c_keepalivetime = space.uint_w(w_time) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -74,7 +74,7 @@ return space.newtuple([w_fileobj, w_filename, w_import_info]) def load_module(space, w_name, w_file, w_filename, w_info): - w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info) + w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info, 3) filename = space.str0_w(w_filename) filemode = space.str_w(w_filemode) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -679,6 +679,10 @@ assert module.__name__ == 'a' assert module.__file__ == 'invalid_path_name' + def test_crash_load_module(self): + import imp + raises(ValueError, imp.load_module, "", "", "", [1, 2, 3, 4]) + class TestAbi: def test_abi_tag(self): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -318,7 +318,7 @@ if not base.issequence_w(space, w_shape): w_shape = space.newtuple([w_shape,]) else: - w_fldname, w_flddesc = space.fixedview(w_elem) + w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) fldname = space.str_w(w_fldname) if fldname in fields: diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -43,6 +43,7 @@ assert isinstance(res, str) rctime.ctime(rctime.time()) raises(ValueError, rctime.ctime, 1E200) + raises(OverflowError, rctime.ctime, 10**900) def test_gmtime(self): import time as rctime diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -126,10 +126,7 @@ return W_ComplexObject(w_int.intval, 0.0) def delegate_Long2Complex(space, w_long): - try: - dval = w_long.tofloat() - except OverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(str(e))) + dval = w_long.tofloat(space) return W_ComplexObject(dval, 0.0) def delegate_Float2Complex(space, w_float): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1039,15 +1039,18 @@ def update1(space, w_dict, w_data): - if space.findattr(w_data, space.wrap("keys")) is None: + if isinstance(w_data, W_DictMultiObject): # optimization case only + update1_dict_dict(space, w_dict, w_data) + return + w_method = space.findattr(w_data, space.wrap("keys")) + if w_method is None: # no 'keys' method, so we assume it is a sequence of pairs - update1_pairs(space, w_dict, w_data) + data_w = space.listview(w_data) + update1_pairs(space, w_dict, data_w) else: - if isinstance(w_data, W_DictMultiObject): # optimization case only - update1_dict_dict(space, w_dict, w_data) - else: - # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" - update1_keys(space, w_dict, w_data) + # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" + data_w = space.listview(space.call_function(w_method)) + update1_keys(space, w_dict, w_data, data_w) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -1061,8 +1064,8 @@ w_dict.setitem(w_key, w_value) -def update1_pairs(space, w_dict, w_data): - for w_pair in space.listview(w_data): +def update1_pairs(space, w_dict, data_w): + for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: raise OperationError(space.w_ValueError, @@ -1071,9 +1074,8 @@ w_dict.setitem(w_key, w_value) -def update1_keys(space, w_dict, w_data): - w_keys = space.call_method(w_data, "keys") - for w_key in space.listview(w_keys): +def update1_keys(space, w_dict, w_data, data_w): + for w_key in data_w: w_value = space.getitem(w_data, w_key) w_dict.setitem(w_key, w_value) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -62,11 +62,7 @@ # long-to-float delegation def delegate_Long2Float(space, w_longobj): - try: - return W_FloatObject(w_longobj.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return W_FloatObject(w_longobj.tofloat(space)) # float__Float is supposed to do nothing, unless it has diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -26,8 +26,12 @@ def longval(self): return self.num.tolong() - def tofloat(self): - return self.num.tofloat() + def tofloat(self, space): + try: + return self.num.tofloat() + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("long int too large to convert to float")) def toint(self): return self.num.toint() @@ -66,7 +70,7 @@ return w_self.num def float_w(self, space): - return self.num.tofloat() + return self.tofloat(space) def int(self, space): if (type(self) is not W_LongObject and @@ -124,11 +128,7 @@ return long__Long(space, w_value) def float__Long(space, w_longobj): - try: - return space.newfloat(w_longobj.num.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return space.newfloat(w_longobj.tofloat(space)) def repr__Long(space, w_long): return space.wrap(w_long.num.repr()) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -180,8 +180,7 @@ if not space.isinstance_w(w_other, space.w_set): return space.w_False - # XXX there is no test_buildinshortcut.py - # tested in test_buildinshortcut.py + # tested in test_builtinshortcut.py # XXX do not make new setobject here w_other_as_set = self._newobj(space, w_other) return space.wrap(self.equals(w_other_as_set)) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -367,6 +367,16 @@ d.update({'foo': 'bar'}, baz=1) assert d == {'foo': 'bar', 'baz': 1} + def test_update_keys_method(self): + class Foo(object): + def keys(self): + return [4, 1] + def __getitem__(self, key): + return key * 10 + d = {} + d.update(Foo()) + assert d == {1: 10, 4: 40} + def test_values(self): d = {1: 2, 3: 4} vals = d.values() diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -18,6 +18,12 @@ w_obj = fromlong(42) assert space.unwrap(w_obj) == 42 + def test_overflow_error(self): + space = self.space + fromlong = lobj.W_LongObject.fromlong + w_big = fromlong(10**900) + space.raises_w(space.w_OverflowError, space.float_w, w_big) + def test_rint_variants(self): py.test.skip("XXX broken!") from rpython.rtyper.tool.rfficache import platform From noreply at buildbot.pypy.org Fri Jul 12 04:30:20 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 04:30:20 +0200 (CEST) Subject: [pypy-commit] pypy precise-instantiate: Try to generate an indirect call with known graphs for instantiate on a ClassesPBC. Message-ID: <20130712023020.643C61C02E4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: precise-instantiate Changeset: r65362:73b667a788ed Date: 2013-07-12 12:29 +1000 http://bitbucket.org/pypy/pypy/changeset/73b667a788ed/ Log: Try to generate an indirect call with known graphs for instantiate on a ClassesPBC. diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -132,7 +132,6 @@ fgraph = graphof(t, func) return VirtualizableAnalyzer(t).analyze(fgraph.startblock.operations[0]) - @pytest.mark.xfail def test_constructor(self): class A(object): x = 1 diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -394,9 +394,12 @@ # no __init__ here, AbstractClassesPBCRepr.__init__ is good enough def _instantiate_runtime_class(self, hop, vtypeptr, r_instance): - from rpython.rtyper.lltypesystem.rbuiltin import ll_instantiate - v_inst1 = hop.gendirectcall(ll_instantiate, vtypeptr) - return hop.genop('cast_pointer', [v_inst1], resulttype = r_instance) + v_instantiate = hop.genop('getfield', [vtypeptr, hop.inputconst(Void, "instantiate")], resulttype=vtypeptr.concretetype.TO.instantiate) + possible_graphs = hop.inputconst(Void, + [desc.getclassdef(None).my_instantiate_graph for desc in self.s_pbc.descriptions] + ) + v_inst = hop.genop('indirect_call', [v_instantiate, possible_graphs], resulttype=vtypeptr.concretetype.TO.instantiate.TO.RESULT) + return hop.genop('cast_pointer', [v_inst], resulttype=r_instance) def getlowleveltype(self): return rclass.CLASSTYPE From noreply at buildbot.pypy.org Fri Jul 12 05:03:11 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 05:03:11 +0200 (CEST) Subject: [pypy-commit] pypy default: typo fix Message-ID: <20130712030311.960711C3585@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65363:ecfd8b3da646 Date: 2013-07-12 13:02 +1000 http://bitbucket.org/pypy/pypy/changeset/ecfd8b3da646/ Log: typo fix diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -740,7 +740,7 @@ s = a.build_types(f, [B]) assert s.classdef is a.bookkeeper.getuniqueclassdef(C) - def test_union_type_some_opbc(self): + def test_union_type_some_pbc(self): class A(object): name = "A" From noreply at buildbot.pypy.org Fri Jul 12 05:22:49 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 05:22:49 +0200 (CEST) Subject: [pypy-commit] pypy precise-instantiate: some import and dead clean up removal Message-ID: <20130712032249.89ED51C02E4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: precise-instantiate Changeset: r65364:c71041db17e0 Date: 2013-07-12 13:14 +1000 http://bitbucket.org/pypy/pypy/changeset/c71041db17e0/ Log: some import and dead clean up removal diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -1,25 +1,20 @@ import types -import sys -from rpython.tool.pairtype import pairtype, pair -from rpython.annotator import model as annmodel -from rpython.annotator import description -from rpython.flowspace.model import Constant, Variable -from rpython.rtyper.lltypesystem.lltype import \ - typeOf, Void, ForwardReference, Struct, Bool, Char, \ - Ptr, malloc, nullptr, Array, Signed, FuncType -from rpython.rtyper.rmodel import Repr, TyperError, inputconst, inputdesc -from rpython.rtyper.rpbc import samesig,\ - commonbase, allattributenames, adjust_shape, \ - AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, \ - AbstractMultipleFrozenPBCRepr, MethodOfFrozenPBCRepr, \ - AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, \ - SingleFrozenPBCRepr, none_frozen_pbc_repr, get_concrete_calltable + +from rpython.annotator import description, model as annmodel +from rpython.rlib.debug import ll_assert +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper import callparse from rpython.rtyper.lltypesystem import rclass, llmemory -from rpython.tool.sourcetools import has_varargs -from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.debug import ll_assert +from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, + Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) +from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.rpbc import (AbstractClassesPBCRepr, AbstractMethodsPBCRepr, + OverriddenFunctionPBCRepr, AbstractMultipleFrozenPBCRepr, + AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, + SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, + get_concrete_calltable) +from rpython.tool.pairtype import pairtype -from rpython.rtyper import callparse def rtype_is_None(robj1, rnone2, hop, pos=0): if isinstance(robj1.lowleveltype, Ptr): @@ -41,6 +36,7 @@ else: raise TyperError('rtype_is_None of %r' % (robj1)) + # ____________________________________________________________ class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): @@ -67,7 +63,7 @@ mangled_name, r_value = self.fieldmap[attr] cmangledname = inputconst(Void, mangled_name) return llops.genop('getfield', [vpbc, cmangledname], - resulttype = r_value) + resulttype=r_value) class MultipleUnrelatedFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr): @@ -86,6 +82,7 @@ def null_instance(self): return llmemory.Address._defl() + class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr), pairtype(MultipleUnrelatedFrozenPBCRepr, @@ -100,11 +97,13 @@ vlist = hop.inputargs(r, r) return hop.genop('adr_eq', vlist, resulttype=Bool) + class __extend__(pairtype(MultipleFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr)): def convert_from_to((robj1, robj2), v, llops): return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address) + # ____________________________________________________________ class FunctionsPBCRepr(AbstractFunctionsPBCRepr): @@ -123,6 +122,7 @@ def get_specfunc_row(self, llop, v, c_rowname, resulttype): return llop.genop('getfield', [v, c_rowname], resulttype=resulttype) + class SmallFunctionSetPBCRepr(Repr): def __init__(self, rtyper, s_pbc): self.rtyper = rtyper @@ -252,15 +252,6 @@ return hop.genop('char_ne', [v1, inputconst(Char, '\000')], resulttype=Bool) -## def rtype_simple_call(self, hop): -## v_index = hop.inputarg(self, arg=0) -## v_ptr = hop.llops.convertvar(v_index, self, self.pointer_repr) -## hop2 = hop.copy() -## hop2.args_r[0] = self.pointer_repr -## hop2.args_v[0] = v_ptr -## return hop2.dispatch() - -## rtype_call_args = rtype_simple_call class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_set, r_ptr), v, llops): @@ -273,6 +264,7 @@ return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], resulttype=r_ptr.lowleveltype) + def compression_function(r_set): if r_set._compression_function is None: table = [] @@ -280,6 +272,7 @@ table.append((chr(i), p)) last_c, last_p = table[-1] unroll_table = unrolling_iterable(table[:-1]) + def ll_compress(fnptr): for c, p in unroll_table: if fnptr == p: @@ -290,6 +283,7 @@ r_set._compression_function = ll_compress return r_set._compression_function + class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_ptr, r_set), v, llops): if r_ptr.lowleveltype is Void: @@ -299,6 +293,7 @@ ll_compress = compression_function(r_set) return llops.gendirectcall(ll_compress, v) + def conversion_table(r_from, r_to): if r_to in r_from._conversion_tables: return r_from._conversion_tables[r_to] @@ -320,7 +315,6 @@ r_from._conversion_tables[r_to] = r return r -## myf = open('convlog.txt', 'w') class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_from, r_to), v, llops): @@ -343,6 +337,7 @@ else: return v + class MethodsPBCRepr(AbstractMethodsPBCRepr): """Representation selected for a PBC of the form {func: classdef...}. It assumes that all the methods come from the same name in a base @@ -418,17 +413,3 @@ return 0 else: return cls.hash - -# ____________________________________________________________ - -##def rtype_call_memo(hop): -## memo_table = hop.args_v[0].value -## if memo_table.s_result.is_constant(): -## return hop.inputconst(hop.r_result, memo_table.s_result.const) -## fieldname = memo_table.fieldname -## assert hop.nb_args == 2, "XXX" - -## r_pbc = hop.args_r[1] -## assert isinstance(r_pbc, (MultipleFrozenPBCRepr, ClassesPBCRepr)) -## v_table, v_pbc = hop.inputargs(Void, r_pbc) -## return r_pbc.getfield(v_pbc, fieldname, hop.llops) From noreply at buildbot.pypy.org Fri Jul 12 05:22:50 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 05:22:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Replace a dict with a set. Message-ID: <20130712032250.D30B71C02E4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65365:4dd7f1d50603 Date: 2013-07-12 13:22 +1000 http://bitbucket.org/pypy/pypy/changeset/4dd7f1d50603/ Log: Replace a dict with a set. diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -32,22 +32,22 @@ def collect_called_graphs(graph, translator, include_oosend=True): - graphs_or_something = {} + graphs_or_something = set() for block in graph.iterblocks(): for op in block.operations: if op.opname == "direct_call": graph = get_graph(op.args[0], translator) if graph is not None: - graphs_or_something[graph] = True + graphs_or_something.add(graph) else: - graphs_or_something[op.args[0]] = True + graphs_or_something.add(op.args[0]) if op.opname == "indirect_call": graphs = op.args[-1].value if graphs is None: - graphs_or_something[op.args[0]] = True + graphs_or_something.add(op.args[0]) else: for graph in graphs: - graphs_or_something[graph] = True + graphs_or_something.add(graph) if op.opname == 'oosend' and include_oosend: meth = get_meth_from_oosend(op) if hasattr(meth, 'graph'): @@ -56,7 +56,7 @@ key = CanRaise(meth._can_raise) else: key = op.args[0] - graphs_or_something[key] = True + graphs_or_something.add(key) return graphs_or_something def iter_callsites(graph, calling_what): From noreply at buildbot.pypy.org Fri Jul 12 07:21:51 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 07:21:51 +0200 (CEST) Subject: [pypy-commit] pypy precise-instantiate: rewrite these tests so they now pass Message-ID: <20130712052151.A1A001C02E4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: precise-instantiate Changeset: r65366:0facbce0dd89 Date: 2013-07-12 15:21 +1000 http://bitbucket.org/pypy/pypy/changeset/0facbce0dd89/ Log: rewrite these tests so they now pass diff --git a/rpython/jit/codewriter/test/test_codewriter.py b/rpython/jit/codewriter/test/test_codewriter.py --- a/rpython/jit/codewriter/test/test_codewriter.py +++ b/rpython/jit/codewriter/test/test_codewriter.py @@ -13,6 +13,7 @@ self.ARGS = ARGS self.RESULT = RESULT self.effectinfo = effectinfo + def get_extra_info(self): return self.effectinfo @@ -37,7 +38,7 @@ class tracker: pass - + calldescrof = FakeCallDescr fielddescrof = FakeFieldDescr sizeof = FakeSizeDescr @@ -121,20 +122,32 @@ blackholeinterp.run() assert blackholeinterp.get_tmpreg_i() == 100+6+5+4+3 + def test_instantiate(): - class A1: id = 651 - class A2(A1): id = 652 - class B1: id = 661 - class B2(B1): id = 662 + class A1: + id = 651 + + class A2(A1): + id = 652 + + class B1: + id = 661 + + class B2(B1): + id = 662 + def dont_look(n): return n + 1 + + classes = [ + (A1, B1), + (A2, B2) + ] + def f(n): - if n > 5: - x, y = A1, B1 - else: - x, y = A2, B2 + x, y = classes[n] return x().id + y().id + dont_look(n) - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [0]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(FakePolicy()) @@ -149,16 +162,10 @@ else: assert 0, "missing instantiate_*_%s in:\n%r" % (expected, names) - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'B1' in names - assert 'A2' in names - assert 'B2' in names + names = set([value for key, value in cw.assembler.list_of_addr2name]) assert 'dont_look' in names + def test_instantiate_with_unreasonable_attr(): # It is possible to have in real code the instantiate() function for # a class be dont-look-inside. This is caused by the code that @@ -169,17 +176,19 @@ name = graph.name return not (name.startswith('instantiate_') and name.endswith('A2')) + class A1: pass + class A2(A1): pass + + classes = [A1, A2] + def f(n): - if n > 5: - x = A1 - else: - x = A2 + x = classes[n] x() - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [1]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(MyFakePolicy()) @@ -188,12 +197,7 @@ names = [jitcode.name for jitcode in cw.assembler.indirectcalltargets] assert len(names) == 1 assert names[0].startswith('instantiate_') and names[0].endswith('A1') - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'A2' in names + def test_int_abs(): def f(n): @@ -209,7 +213,7 @@ def test_raw_malloc_and_access(): TP = rffi.CArray(lltype.Signed) - + def f(n): a = lltype.malloc(TP, n, flavor='raw') a[0] = n From noreply at buildbot.pypy.org Fri Jul 12 07:57:23 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 07:57:23 +0200 (CEST) Subject: [pypy-commit] pypy precise-instantiate: Close branch for merge. Message-ID: <20130712055723.C7BC31C304B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: precise-instantiate Changeset: r65367:120376465290 Date: 2013-07-12 15:55 +1000 http://bitbucket.org/pypy/pypy/changeset/120376465290/ Log: Close branch for merge. From noreply at buildbot.pypy.org Fri Jul 12 07:57:24 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 07:57:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged precise-instantiate: Message-ID: <20130712055724.F1FC01C3050@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65368:0bcbff2e5aca Date: 2013-07-12 15:56 +1000 http://bitbucket.org/pypy/pypy/changeset/0bcbff2e5aca/ Log: Merged precise-instantiate: Makes it so the analyzers are able to see which "instantiate" functions can actually be called when cls() could be any one of several classes. diff --git a/rpython/jit/codewriter/test/test_codewriter.py b/rpython/jit/codewriter/test/test_codewriter.py --- a/rpython/jit/codewriter/test/test_codewriter.py +++ b/rpython/jit/codewriter/test/test_codewriter.py @@ -13,6 +13,7 @@ self.ARGS = ARGS self.RESULT = RESULT self.effectinfo = effectinfo + def get_extra_info(self): return self.effectinfo @@ -37,7 +38,7 @@ class tracker: pass - + calldescrof = FakeCallDescr fielddescrof = FakeFieldDescr sizeof = FakeSizeDescr @@ -121,20 +122,32 @@ blackholeinterp.run() assert blackholeinterp.get_tmpreg_i() == 100+6+5+4+3 + def test_instantiate(): - class A1: id = 651 - class A2(A1): id = 652 - class B1: id = 661 - class B2(B1): id = 662 + class A1: + id = 651 + + class A2(A1): + id = 652 + + class B1: + id = 661 + + class B2(B1): + id = 662 + def dont_look(n): return n + 1 + + classes = [ + (A1, B1), + (A2, B2) + ] + def f(n): - if n > 5: - x, y = A1, B1 - else: - x, y = A2, B2 + x, y = classes[n] return x().id + y().id + dont_look(n) - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [0]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(FakePolicy()) @@ -149,16 +162,10 @@ else: assert 0, "missing instantiate_*_%s in:\n%r" % (expected, names) - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'B1' in names - assert 'A2' in names - assert 'B2' in names + names = set([value for key, value in cw.assembler.list_of_addr2name]) assert 'dont_look' in names + def test_instantiate_with_unreasonable_attr(): # It is possible to have in real code the instantiate() function for # a class be dont-look-inside. This is caused by the code that @@ -169,17 +176,19 @@ name = graph.name return not (name.startswith('instantiate_') and name.endswith('A2')) + class A1: pass + class A2(A1): pass + + classes = [A1, A2] + def f(n): - if n > 5: - x = A1 - else: - x = A2 + x = classes[n] x() - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [1]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(MyFakePolicy()) @@ -188,12 +197,7 @@ names = [jitcode.name for jitcode in cw.assembler.indirectcalltargets] assert len(names) == 1 assert names[0].startswith('instantiate_') and names[0].endswith('A1') - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'A2' in names + def test_int_abs(): def f(n): @@ -209,7 +213,7 @@ def test_raw_malloc_and_access(): TP = rffi.CArray(lltype.Signed) - + def f(n): a = lltype.malloc(TP, n, flavor='raw') a[0] = n diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -132,7 +132,6 @@ fgraph = graphof(t, func) return VirtualizableAnalyzer(t).analyze(fgraph.startblock.operations[0]) - @pytest.mark.xfail def test_constructor(self): class A(object): x = 1 diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -1,25 +1,20 @@ import types -import sys -from rpython.tool.pairtype import pairtype, pair -from rpython.annotator import model as annmodel -from rpython.annotator import description -from rpython.flowspace.model import Constant, Variable -from rpython.rtyper.lltypesystem.lltype import \ - typeOf, Void, ForwardReference, Struct, Bool, Char, \ - Ptr, malloc, nullptr, Array, Signed, FuncType -from rpython.rtyper.rmodel import Repr, TyperError, inputconst, inputdesc -from rpython.rtyper.rpbc import samesig,\ - commonbase, allattributenames, adjust_shape, \ - AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, \ - AbstractMultipleFrozenPBCRepr, MethodOfFrozenPBCRepr, \ - AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, \ - SingleFrozenPBCRepr, none_frozen_pbc_repr, get_concrete_calltable + +from rpython.annotator import description, model as annmodel +from rpython.rlib.debug import ll_assert +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper import callparse from rpython.rtyper.lltypesystem import rclass, llmemory -from rpython.tool.sourcetools import has_varargs -from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.debug import ll_assert +from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, + Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) +from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.rpbc import (AbstractClassesPBCRepr, AbstractMethodsPBCRepr, + OverriddenFunctionPBCRepr, AbstractMultipleFrozenPBCRepr, + AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, + SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, + get_concrete_calltable) +from rpython.tool.pairtype import pairtype -from rpython.rtyper import callparse def rtype_is_None(robj1, rnone2, hop, pos=0): if isinstance(robj1.lowleveltype, Ptr): @@ -41,6 +36,7 @@ else: raise TyperError('rtype_is_None of %r' % (robj1)) + # ____________________________________________________________ class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): @@ -67,7 +63,7 @@ mangled_name, r_value = self.fieldmap[attr] cmangledname = inputconst(Void, mangled_name) return llops.genop('getfield', [vpbc, cmangledname], - resulttype = r_value) + resulttype=r_value) class MultipleUnrelatedFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr): @@ -86,6 +82,7 @@ def null_instance(self): return llmemory.Address._defl() + class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr), pairtype(MultipleUnrelatedFrozenPBCRepr, @@ -100,11 +97,13 @@ vlist = hop.inputargs(r, r) return hop.genop('adr_eq', vlist, resulttype=Bool) + class __extend__(pairtype(MultipleFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr)): def convert_from_to((robj1, robj2), v, llops): return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address) + # ____________________________________________________________ class FunctionsPBCRepr(AbstractFunctionsPBCRepr): @@ -123,6 +122,7 @@ def get_specfunc_row(self, llop, v, c_rowname, resulttype): return llop.genop('getfield', [v, c_rowname], resulttype=resulttype) + class SmallFunctionSetPBCRepr(Repr): def __init__(self, rtyper, s_pbc): self.rtyper = rtyper @@ -252,15 +252,6 @@ return hop.genop('char_ne', [v1, inputconst(Char, '\000')], resulttype=Bool) -## def rtype_simple_call(self, hop): -## v_index = hop.inputarg(self, arg=0) -## v_ptr = hop.llops.convertvar(v_index, self, self.pointer_repr) -## hop2 = hop.copy() -## hop2.args_r[0] = self.pointer_repr -## hop2.args_v[0] = v_ptr -## return hop2.dispatch() - -## rtype_call_args = rtype_simple_call class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_set, r_ptr), v, llops): @@ -273,6 +264,7 @@ return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], resulttype=r_ptr.lowleveltype) + def compression_function(r_set): if r_set._compression_function is None: table = [] @@ -280,6 +272,7 @@ table.append((chr(i), p)) last_c, last_p = table[-1] unroll_table = unrolling_iterable(table[:-1]) + def ll_compress(fnptr): for c, p in unroll_table: if fnptr == p: @@ -290,6 +283,7 @@ r_set._compression_function = ll_compress return r_set._compression_function + class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_ptr, r_set), v, llops): if r_ptr.lowleveltype is Void: @@ -299,6 +293,7 @@ ll_compress = compression_function(r_set) return llops.gendirectcall(ll_compress, v) + def conversion_table(r_from, r_to): if r_to in r_from._conversion_tables: return r_from._conversion_tables[r_to] @@ -320,7 +315,6 @@ r_from._conversion_tables[r_to] = r return r -## myf = open('convlog.txt', 'w') class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_from, r_to), v, llops): @@ -343,6 +337,7 @@ else: return v + class MethodsPBCRepr(AbstractMethodsPBCRepr): """Representation selected for a PBC of the form {func: classdef...}. It assumes that all the methods come from the same name in a base @@ -394,9 +389,12 @@ # no __init__ here, AbstractClassesPBCRepr.__init__ is good enough def _instantiate_runtime_class(self, hop, vtypeptr, r_instance): - from rpython.rtyper.lltypesystem.rbuiltin import ll_instantiate - v_inst1 = hop.gendirectcall(ll_instantiate, vtypeptr) - return hop.genop('cast_pointer', [v_inst1], resulttype = r_instance) + v_instantiate = hop.genop('getfield', [vtypeptr, hop.inputconst(Void, "instantiate")], resulttype=vtypeptr.concretetype.TO.instantiate) + possible_graphs = hop.inputconst(Void, + [desc.getclassdef(None).my_instantiate_graph for desc in self.s_pbc.descriptions] + ) + v_inst = hop.genop('indirect_call', [v_instantiate, possible_graphs], resulttype=vtypeptr.concretetype.TO.instantiate.TO.RESULT) + return hop.genop('cast_pointer', [v_inst], resulttype=r_instance) def getlowleveltype(self): return rclass.CLASSTYPE @@ -415,17 +413,3 @@ return 0 else: return cls.hash - -# ____________________________________________________________ - -##def rtype_call_memo(hop): -## memo_table = hop.args_v[0].value -## if memo_table.s_result.is_constant(): -## return hop.inputconst(hop.r_result, memo_table.s_result.const) -## fieldname = memo_table.fieldname -## assert hop.nb_args == 2, "XXX" - -## r_pbc = hop.args_r[1] -## assert isinstance(r_pbc, (MultipleFrozenPBCRepr, ClassesPBCRepr)) -## v_table, v_pbc = hop.inputargs(Void, r_pbc) -## return r_pbc.getfield(v_pbc, fieldname, hop.llops) From noreply at buildbot.pypy.org Fri Jul 12 08:16:27 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 08:16:27 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Document a missed optimization Message-ID: <20130712061627.31F491C02E4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r4983:6afbc6bbdc37 Date: 2013-07-12 16:16 +1000 http://bitbucket.org/pypy/extradoc/changeset/6afbc6bbdc37/ Log: Document a missed optimization diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -83,6 +83,15 @@ - p0 = call_pure(ConstClass(something), ConstPtr(2)) guard_exception(SomeException) +- f0 = convert_longlong_bytes_to_float(i0) + setarrayitem_gc(p0, 0, f0, descr=) + + This should be folded into: + + setarrayitem_gc(p0, 0, i0, descr=) + + (This applies to the read direction as well) + PYTHON EXAMPLES --------------- From noreply at buildbot.pypy.org Fri Jul 12 08:37:51 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 12 Jul 2013 08:37:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove some code that was temporary. Message-ID: <20130712063751.22DDF1C304B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65369:c1825a7b729a Date: 2013-07-12 16:37 +1000 http://bitbucket.org/pypy/pypy/changeset/c1825a7b729a/ Log: Remove some code that was temporary. diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -23,7 +23,6 @@ supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode supports_singlefloats = not detect_hardfloat() - can_inline_varsize_malloc = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -21,8 +21,6 @@ class AbstractLLCPU(AbstractCPU): from rpython.jit.metainterp.typesystem import llhelper as ts - can_inline_varsize_malloc = False - def __init__(self, rtyper, stats, opts, translate_support_code=False, gcdescr=None): assert type(opts) is not bool diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -305,8 +305,6 @@ arraydescr, kind=FLAG_ARRAY): """ itemsize is an int, v_length and v_result are boxes """ - if not self.cpu.can_inline_varsize_malloc: - return False # temporary, kill when ARM supports it gc_descr = self.gc_ll_descr if (kind == FLAG_ARRAY and (arraydescr.basesize != gc_descr.standard_array_basesize or diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -109,8 +109,6 @@ class BaseFakeCPU(object): JITFRAME_FIXED_SIZE = 0 - can_inline_varsize_malloc = True - def __init__(self): self.tracker = FakeTracker() self._cache = {} diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -23,8 +23,6 @@ with_threads = False frame_reg = regloc.ebp - can_inline_varsize_malloc = True - from rpython.jit.backend.x86.arch import JITFRAME_FIXED_SIZE all_reg_indexes = gpr_reg_mgr_cls.all_reg_indexes gen_regs = gpr_reg_mgr_cls.all_regs From noreply at buildbot.pypy.org Fri Jul 12 11:06:31 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 12 Jul 2013 11:06:31 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add links Message-ID: <20130712090631.11CB71C02E4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4984:67ba462afa67 Date: 2013-07-12 10:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/67ba462afa67/ Log: add links diff --git a/blog/draft/duhton.rst b/blog/draft/duhton.rst --- a/blog/draft/duhton.rst +++ b/blog/draft/duhton.rst @@ -23,9 +23,7 @@ there are no conflicting writes to global memory and hence the demos are very amenable to parallelization. They exercise: -* arithmetics - ``demo/many_sqare_roots.duh``:: - - +* arithmetics - ``demo/many_sqare_roots.duh`` * read-only access to globals - ``demo/trees.duh`` @@ -62,3 +60,9 @@ Cheers, fijal on behalf of Remi Meier and Armin Rigo + +.. _`the previous blog post`: http://morepypy.blogspot.com/2013/06/stm-on-drawing-board.html +.. _`lisp-like/scheme-like interpreter`: https://bitbucket.org/arigo/duhton +.. _`the stmgc repo`: https://bitbucket.org/pypy/stmgc +.. _`the duhton repo`: https://bitbucket.org/arigo/duhton + From noreply at buildbot.pypy.org Fri Jul 12 11:06:32 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 12 Jul 2013 11:06:32 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20130712090632.65F741C02E4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4985:fbfe3d8fe09a Date: 2013-07-12 11:06 +0200 http://bitbucket.org/pypy/extradoc/changeset/fbfe3d8fe09a/ Log: merge diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -83,6 +83,15 @@ - p0 = call_pure(ConstClass(something), ConstPtr(2)) guard_exception(SomeException) +- f0 = convert_longlong_bytes_to_float(i0) + setarrayitem_gc(p0, 0, f0, descr=) + + This should be folded into: + + setarrayitem_gc(p0, 0, i0, descr=) + + (This applies to the read direction as well) + PYTHON EXAMPLES --------------- From noreply at buildbot.pypy.org Fri Jul 12 13:31:51 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 12 Jul 2013 13:31:51 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Start a branch about conditional calls. implement a basic one-arg test and Message-ID: <20130712113151.AEF181C0306@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65370:ac2a867e421e Date: 2013-07-12 13:30 +0200 http://bitbucket.org/pypy/pypy/changeset/ac2a867e421e/ Log: Start a branch about conditional calls. implement a basic one-arg test and the llgraph backend diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -286,7 +286,7 @@ def get_savedata_ref(self, deadframe): assert deadframe._saved_data is not None return deadframe._saved_data - + # ------------------------------------------------------------ def calldescrof(self, FUNC, ARGS, RESULT, effect_info): @@ -334,7 +334,7 @@ except KeyError: descr = InteriorFieldDescr(A, fieldname) self.descrs[key] = descr - return descr + return descr def _calldescr_dynamic_for_tests(self, atypes, rtype, abiname='FFI_DEFAULT_ABI'): @@ -802,7 +802,7 @@ else: ovf = False self.overflow_flag = ovf - return z + return z def execute_guard_no_overflow(self, descr): if self.overflow_flag: @@ -821,6 +821,11 @@ x = math.sqrt(y) return support.cast_to_floatstorage(x) + def execute_cond_call(self, calldescr, cond, func, *args): + if not cond: + return + self.execute_call(calldescr, func, *args) + def execute_call(self, calldescr, func, *args): effectinfo = calldescr.get_extra_info() if effectinfo is not None and hasattr(effectinfo, 'oopspecindex'): diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2266,6 +2266,40 @@ value |= 32768 assert s.data.tid == value + def test_cond_call(self): + called = [] + + def func_void(arg): + called.append(arg) + + FUNC = self.FuncType([lltype.Signed], lltype.Void) + func_ptr = llhelper(lltype.Ptr(FUNC), func_void) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, f0, f1] + cond_call(i1, ConstClass(func_ptr), i2, descr=calldescr) + guard_false(i0, descr=faildescr) [i1, i2, i3, i4, i5, i6, f0, f1] + ''' + loop = parse(ops, namespace={'faildescr': BasicFailDescr(), + 'func_ptr': func_ptr, + 'calldescr': calldescr}) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, 1.2, 3.4) + assert not called + for i in range(5): + assert self.cpu.get_int_value(frame, i) == i + assert self.cpu.get_float_value(frame, 6) == 1.2 + assert self.cpu.get_float_value(frame, 7) == 3.4 + frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, 1.2, 3.4) + assert called == [1] + for i in range(4): + assert self.cpu.get_int_value(frame, i + 1) == i + 1 + assert self.cpu.get_float_value(frame, 6) == 1.2 + assert self.cpu.get_float_value(frame, 7) == 3.4 + def test_force_operations_returning_void(self): values = [] def maybe_force(token, flag): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -149,6 +149,22 @@ mc.RET() self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) + def _build_call_slowpath(self, no_args): + """ This builds a general call slowpath, for whatever call happens to + come. + """ + mc = codebuf.MachineCodeBlockWrapper() + self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats, + callee_only=False) + assert no_args == 1 + mc.SUB(esp, imm(WORD)) + # first arg is always in edi + mc.CALL() + mc.ADD(esp, imm(WORD)) + self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats, + callee_only=False) + mc.RET() + def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. The arguments are passed in eax and edi, as follows: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -518,6 +518,7 @@ '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', 'CALL/*d', + 'COND_CALL/*d', # a conditional call, with first argument as a condition 'CALL_ASSEMBLER/*d', # call already compiled assembler 'CALL_MAY_FORCE/*d', 'CALL_LOOPINVARIANT/*d', From noreply at buildbot.pypy.org Fri Jul 12 15:01:14 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 12 Jul 2013 15:01:14 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: pass the first (fastpath) of the test Message-ID: <20130712130114.178F21C02E4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65371:09e06e2eb839 Date: 2013-07-12 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/09e06e2eb839/ Log: pass the first (fastpath) of the test diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -824,6 +824,7 @@ def execute_cond_call(self, calldescr, cond, func, *args): if not cond: return + # cond_call can't have a return value self.execute_call(calldescr, func, *args) def execute_call(self, calldescr, func, *args): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -106,6 +106,7 @@ kind='unicode') else: self.malloc_slowpath_unicode = None + self.cond_call_slowpath = [0, self._build_cond_call_slowpath(1)] self._build_stack_check_slowpath() self._build_release_gil(gc_ll_descr.gcrootmap) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2293,6 +2293,7 @@ assert self.cpu.get_int_value(frame, i) == i assert self.cpu.get_float_value(frame, 6) == 1.2 assert self.cpu.get_float_value(frame, 7) == 3.4 + xxx frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, 1.2, 3.4) assert called == [1] for i in range(4): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -149,7 +149,7 @@ mc.RET() self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) - def _build_call_slowpath(self, no_args): + def _build_cond_call_slowpath(self, no_args): """ This builds a general call slowpath, for whatever call happens to come. """ @@ -159,11 +159,12 @@ assert no_args == 1 mc.SUB(esp, imm(WORD)) # first arg is always in edi - mc.CALL() + mc.CALL(imm(0)) mc.ADD(esp, imm(WORD)) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats, callee_only=False) mc.RET() + return 0 def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. @@ -2140,6 +2141,17 @@ def label(self): self._check_frame_depth_debug(self.mc) + def cond_call(self, op, gcmap, cond_loc, call_loc, arglocs): + self.mc.CMP(cond_loc, cond_loc) + self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later + jmp_adr = self.mc.get_relative_pos() + self.push_gcmap(self.mc, gcmap, mov=True) + self.mc.CALL(imm(self.cond_call_slowpath[len(arglocs)])) + # never any result value + offset = self.mc.get_relative_pos() - jmp_adr + assert 0 < offset <= 127 + self.mc.overwrite(jmp_adr-1, chr(offset)) + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -797,6 +797,16 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb + def consider_cond_call(self, op): + assert op.result is None + args = op.getarglist() + assert len(args) == 1 + 2 + self.make_sure_var_in_reg(args[2], selected_reg=edi) + loc_cond = self.make_sure_var_in_reg(args[0], args) + loc_call = self.make_sure_var_in_reg(args[1], args) + self.assembler.cond_call(op, self.get_gcmap(), loc_cond, loc_call, + [edi]) + def consider_call_malloc_nursery(self, op): size_box = op.getarg(0) assert isinstance(size_box, ConstInt) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -346,6 +346,7 @@ rop.CALL_ASSEMBLER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, + rop.COND_CALL, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, From noreply at buildbot.pypy.org Fri Jul 12 15:19:01 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 12 Jul 2013 15:19:01 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: woot, make the first test pass Message-ID: <20130712131901.E1CEA1C0306@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65372:5085a0267092 Date: 2013-07-12 15:18 +0200 http://bitbucket.org/pypy/pypy/changeset/5085a0267092/ Log: woot, make the first test pass diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2293,7 +2293,6 @@ assert self.cpu.get_int_value(frame, i) == i assert self.cpu.get_float_value(frame, 6) == 1.2 assert self.cpu.get_float_value(frame, 7) == 3.4 - xxx frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, 1.2, 3.4) assert called == [1] for i in range(4): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -159,12 +159,12 @@ assert no_args == 1 mc.SUB(esp, imm(WORD)) # first arg is always in edi - mc.CALL(imm(0)) + mc.CALL(eax) mc.ADD(esp, imm(WORD)) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats, callee_only=False) mc.RET() - return 0 + return mc.materialize(self.cpu.asmmemmgr, []) def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. @@ -2142,7 +2142,7 @@ self._check_frame_depth_debug(self.mc) def cond_call(self, op, gcmap, cond_loc, call_loc, arglocs): - self.mc.CMP(cond_loc, cond_loc) + self.mc.TEST(cond_loc, cond_loc) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() self.push_gcmap(self.mc, gcmap, mov=True) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -802,8 +802,8 @@ args = op.getarglist() assert len(args) == 1 + 2 self.make_sure_var_in_reg(args[2], selected_reg=edi) + loc_call = self.make_sure_var_in_reg(args[1], args, selected_reg=eax) loc_cond = self.make_sure_var_in_reg(args[0], args) - loc_call = self.make_sure_var_in_reg(args[1], args) self.assembler.cond_call(op, self.get_gcmap(), loc_cond, loc_call, [edi]) From noreply at buildbot.pypy.org Fri Jul 12 17:59:44 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 12 Jul 2013 17:59:44 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: break less stuff Message-ID: <20130712155944.C13141C0306@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65373:de492c4ceb35 Date: 2013-07-12 07:47 +0200 http://bitbucket.org/pypy/pypy/changeset/de492c4ceb35/ Log: break less stuff diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -271,7 +271,7 @@ return rffi.cast(lltype.Signed, rst_addr) class GcRootMap_stm(object): - is_shadow_stack = False # XXX: should it have an is_stmgc? + is_shadow_stack = True # XXX: should it have an is_stmgc? def __init__(self, gcdescr): pass @@ -289,9 +289,6 @@ self.llop1 = gc_ll_descr.llop1 self.returns_modified_object = False - self.FUNCPTR = lltype.Ptr(lltype.FuncType( - [llmemory.Address], lltype.Void)) - self.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.HDRPTR = gc_ll_descr.HDRPTR @@ -305,6 +302,8 @@ def __init__(self, gc_ll_descr): BarrierDescr.__init__(self, gc_ll_descr) self.fielddescr_tid = gc_ll_descr.fielddescr_tid + self.FUNCPTR = lltype.Ptr(lltype.FuncType( + [llmemory.Address], lltype.Void)) GCClass = gc_ll_descr.GCClass self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG @@ -347,7 +346,7 @@ def get_barrier_funcptr(self, returns_modified_object): assert not returns_modified_object FUNCTYPE = self.FUNCPTR - return llop1.get_write_barrier_failing_case(FUNCTYPE) + return self.llop1.get_write_barrier_failing_case(FUNCTYPE) def get_write_barrier_fn(self, cpu, returns_modified_object): # must pass in 'self.returns_modified_object', to make sure that @@ -383,7 +382,7 @@ # get a pointer to the 'remember_young_pointer' function from # the GC, and call it immediately funcptr = self.get_barrier_funcptr(returns_modified_object) - res = funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) + funcptr(llmemory.cast_ptr_to_adr(gcref_struct)) class STMBarrierDescr(BarrierDescr): @@ -391,13 +390,13 @@ BarrierDescr.__init__(self, gc_ll_descr) self.stmcat = stmcat self.returns_modified_object = True - self.WB_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( + self.B_FUNCPTR_MOD = lltype.Ptr(lltype.FuncType( [llmemory.Address], llmemory.Address)) - self.wb_failing_case_ptr = rffi.llexternal( + self.b_failing_case_ptr = rffi.llexternal( cfunc_name, - self.WB_FUNCPTR_MOD.TO.ARGS, - self.WB_FUNCPTR_MOD.TO.RESULT, + self.B_FUNCPTR_MOD.TO.ARGS, + self.B_FUNCPTR_MOD.TO.RESULT, sandboxsafe=True, _nowrapper=True) @@ -409,7 +408,7 @@ def get_barrier_funcptr(self, returns_modified_object): assert returns_modified_object - return self.wb_failing_case_ptr + return self.b_failing_case_ptr @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): From noreply at buildbot.pypy.org Fri Jul 12 17:59:46 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 12 Jul 2013 17:59:46 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: remove R2W for now; introduce is_stm on GcRootMap Message-ID: <20130712155946.053E01C0306@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65374:add171e74fe2 Date: 2013-07-12 08:29 +0200 http://bitbucket.org/pypy/pypy/changeset/add171e74fe2/ Log: remove R2W for now; introduce is_stm on GcRootMap diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -250,6 +250,7 @@ class GcRootMap_asmgcc(object): is_shadow_stack = False + is_stm = False def __init__(self, gcdescr): pass @@ -259,7 +260,8 @@ class GcRootMap_shadowstack(object): is_shadow_stack = True - + is_stm = False + def __init__(self, gcdescr): pass @@ -271,8 +273,9 @@ return rffi.cast(lltype.Signed, rst_addr) class GcRootMap_stm(object): - is_shadow_stack = True # XXX: should it have an is_stmgc? - + is_shadow_stack = True + is_stm = True + def __init__(self, gcdescr): pass @@ -535,7 +538,6 @@ def _setup_barriers_for_stm(self): self.P2Rdescr = STMReadBarrierDescr(self, 'P2R') self.P2Wdescr = STMWriteBarrierDescr(self, 'P2W') - self.R2Wdescr = STMWriteBarrierDescr(self, 'R2W') self.write_barrier_descr = "wbdescr: do not use" # @specialize.argtype(0) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -34,7 +34,7 @@ 'P': {'R': self.gc_ll_descr.P2Rdescr, 'W': self.gc_ll_descr.P2Wdescr, }, - 'R': {'W': self.gc_ll_descr.R2Wdescr, + 'R': {'W': self.gc_ll_descr.P2Wdescr, }, 'W': {}, } diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -250,7 +250,7 @@ cond_call_stm_b(p1, descr=P2Rdescr) i1 = getfield_gc(p1, descr=tydescr) i2 = int_add(i1, 1) - cond_call_stm_b(p1, descr=R2Wdescr) + cond_call_stm_b(p1, descr=P2Wdescr) setfield_gc(p1, i2, descr=tydescr) jump(p1) """) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1020,12 +1020,16 @@ def _reload_frame_if_necessary(self, mc, align_stack=False): gcrootmap = self.cpu.gc_ll_descr.gcrootmap - if gcrootmap: - if gcrootmap.is_shadow_stack: - rst = gcrootmap.get_root_stack_top_addr() - mc.MOV(ecx, heap(rst)) - mc.MOV(ebp, mem(ecx, -WORD)) - wbdescr = self.cpu.gc_ll_descr.write_barrier_descr + if gcrootmap and gcrootmap.is_shadow_stack: + rst = gcrootmap.get_root_stack_top_addr() + mc.MOV(ecx, heap(rst)) + mc.MOV(ebp, mem(ecx, -WORD)) + + if gcrootmap and gcrootmap.is_stm: + wbdescr = self.cpu.gc_ll_descr.P2Wdescr + else: + wbdescr = self.cpu.gc_ll_descr.write_barrier_descr + if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not # an array From noreply at buildbot.pypy.org Fri Jul 12 17:59:47 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 12 Jul 2013 17:59:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: progress on stm barriers (without fastpath) and GC without malloc fastpaths (nursery) Message-ID: <20130712155947.4CC241C0306@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65375:21bde4788254 Date: 2013-07-12 16:47 +0200 http://bitbucket.org/pypy/pypy/changeset/21bde4788254/ Log: progress on stm barriers (without fastpath) and GC without malloc fastpaths (nursery) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -81,27 +81,34 @@ self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) self._build_failure_recovery(False, withfloats=False) self._build_failure_recovery(True, withfloats=False) - self._build_wb_slowpath(False) - self._build_wb_slowpath(True) - self._build_wb_slowpath(False, for_frame=True) + if gc_ll_descr.stm: + descrs = [gc_ll_descr.P2Rdescr, gc_ll_descr.P2Wdescr] + else: + descrs = [gc_ll_descr.write_barrier_descr] + for d in descrs: + self._build_b_slowpath(d, False) + self._build_b_slowpath(d, True) + self._build_b_slowpath(d, False, for_frame=True) # only one of those self.build_frame_realloc_slowpath() if self.cpu.supports_floats: self._build_failure_recovery(False, withfloats=True) self._build_failure_recovery(True, withfloats=True) - self._build_wb_slowpath(False, withfloats=True) - self._build_wb_slowpath(True, withfloats=True) + for d in descrs: + self._build_b_slowpath(d, False, withfloats=True) + self._build_b_slowpath(d, True, withfloats=True) self._build_propagate_exception_path() + if gc_ll_descr.get_malloc_slowpath_addr() is not None: # generate few slowpaths for various cases self.malloc_slowpath = self._build_malloc_slowpath(kind='fixed') self.malloc_slowpath_varsize = self._build_malloc_slowpath( kind='var') - if hasattr(gc_ll_descr, 'malloc_str'): + if gc_ll_descr.get_malloc_slowpath_addr() is not None and hasattr(gc_ll_descr, 'malloc_str'): self.malloc_slowpath_str = self._build_malloc_slowpath(kind='str') else: self.malloc_slowpath_str = None - if hasattr(gc_ll_descr, 'malloc_unicode'): + if gc_ll_descr.get_malloc_slowpath_addr() is not None and hasattr(gc_ll_descr, 'malloc_unicode'): self.malloc_slowpath_unicode = self._build_malloc_slowpath( kind='unicode') else: diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -294,12 +294,42 @@ self.returns_modified_object = False self.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.HDRPTR = gc_ll_descr.HDRPTR + self.b_slowpath = [0, 0, 0, 0] def repr_of_descr(self): raise NotImplementedError def __repr(self): raise NotImplementedError + + def get_b_slowpath(self, num): + return self.b_slowpath[num] + + def set_b_slowpath(self, num, addr): + self.b_slowpath[num] = addr + + def get_barrier_funcptr(self, returns_modified_object): + raise NotImplementedError + + def get_barrier_fn(self, cpu, returns_modified_object): + # must pass in 'self.returns_modified_object', to make sure that + # the callers are fixed for this case + funcptr = self.get_barrier_funcptr(returns_modified_object) + funcaddr = llmemory.cast_ptr_to_adr(funcptr) + return cpu.cast_adr_to_int(funcaddr) + + def get_barrier_from_array_fn(self, cpu): + # returns a function with arguments [array, index, newvalue] + llop1 = self.llop1 + funcptr = llop1.get_write_barrier_from_array_failing_case( + self.FUNCPTR) + funcaddr = llmemory.cast_ptr_to_adr(funcptr) + return cpu.cast_adr_to_int(funcaddr) # this may return 0 + + def has_barrier_from_array(self, cpu): + return self.get_barrier_from_array_fn(cpu) != 0 + + class WriteBarrierDescr(BarrierDescr): def __init__(self, gc_ll_descr): @@ -325,8 +355,6 @@ assert self.jit_wb_cards_set_singlebyte == -0x80 else: self.jit_wb_cards_set = 0 - # - self.wb_slowpath = [0, 0, 0, 0] def repr_of_descr(self): return 'wbdescr' @@ -351,30 +379,6 @@ FUNCTYPE = self.FUNCPTR return self.llop1.get_write_barrier_failing_case(FUNCTYPE) - def get_write_barrier_fn(self, cpu, returns_modified_object): - # must pass in 'self.returns_modified_object', to make sure that - # the callers are fixed for this case - funcptr = self.get_barrier_funcptr(returns_modified_object) - funcaddr = llmemory.cast_ptr_to_adr(funcptr) - return cpu.cast_adr_to_int(funcaddr) - - def get_write_barrier_from_array_fn(self, cpu): - # returns a function with arguments [array, index, newvalue] - llop1 = self.llop1 - funcptr = llop1.get_write_barrier_from_array_failing_case( - self.FUNCPTR) - funcaddr = llmemory.cast_ptr_to_adr(funcptr) - return cpu.cast_adr_to_int(funcaddr) # this may return 0 - - def has_write_barrier_from_array(self, cpu): - return self.get_write_barrier_from_array_fn(cpu) != 0 - - def get_wb_slowpath(self, withcards, withfloats): - return self.wb_slowpath[withcards + 2 * withfloats] - - def set_wb_slowpath(self, withcards, withfloats, addr): - self.wb_slowpath[withcards + 2 * withfloats] = addr - @specialize.arg(2) def _do_barrier(self, gcref_struct, returns_modified_object): assert self.returns_modified_object == returns_modified_object @@ -431,7 +435,7 @@ class STMWriteBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): - assert stmcat in ['P2W', 'R2W'] + assert stmcat in ['P2W'] STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, 'stm_write_barrier') @@ -465,6 +469,8 @@ if not self.stm: # XXX: not needed with stm/shadowstack?? self._setup_tid() + else: + self.fielddescr_tid = None self._setup_write_barrier() self._setup_str() self._make_functions(really_not_translated) @@ -608,6 +614,7 @@ unicode_itemsize = self.unicode_descr.itemsize unicode_ofs_length = self.unicode_descr.lendescr.offset + def malloc_str(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, @@ -615,7 +622,7 @@ str_ofs_length) self.generate_function('malloc_str', malloc_str, [lltype.Signed]) - + def malloc_unicode(length): return llop1.do_malloc_varsize_clear( llmemory.GCREF, @@ -717,7 +724,7 @@ def can_use_nursery_malloc(self, size): return (self.max_size_of_young_obj is not None and size < self.max_size_of_young_obj) - + def has_write_barrier_class(self): return WriteBarrierDescr diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -240,7 +240,8 @@ mallocs. (For all I know this latter case never occurs in practice, but better safe than sorry.) """ - if self.gc_ll_descr.fielddescr_tid is not None: # framework GC + if self.gc_ll_descr.fielddescr_tid is not None \ + or self.gc_ll_descr.stm: # framework GC assert (size & (WORD-1)) == 0, "size not aligned?" addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] @@ -434,7 +435,7 @@ def gen_write_barrier_array(self, v_base, v_index, v_value): write_barrier_descr = self.gc_ll_descr.write_barrier_descr - if write_barrier_descr.has_write_barrier_from_array(self.cpu): + if write_barrier_descr.has_barrier_from_array(self.cpu): # If we know statically the length of 'v', and it is not too # big, then produce a regular write_barrier. If it's unknown or # too big, produce instead a write_barrier_from_array. diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -281,7 +281,7 @@ gcdescr = get_description(config_) self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None, really_not_translated=True) - self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + self.gc_ll_descr.write_barrier_descr.has_barrier_from_array = ( lambda cpu: True) # class FakeCPU(BaseFakeCPU): @@ -573,7 +573,7 @@ """) def test_write_barrier_before_array_without_from_array(self): - self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = ( + self.gc_ll_descr.write_barrier_descr.has_barrier_from_array = ( lambda cpu: False) self.check_rewrite(""" [p1, i2, p3] diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4245,11 +4245,11 @@ class WBDescrForTests(AbstractDescr): returns_modified_object = False - wb_slowpath = (0, 0, 0, 0) - def get_wb_slowpath(self, c1, c2): - return self.wb_slowpath[c1+2*c2] - def set_wb_slowpath(self, c1, c2, addr): + b_slowpath = (0, 0, 0, 0) + def get_b_slowpath(self, c1, c2): + return self.b_slowpath[c1+2*c2] + def set_b_slowpath(self, c1, c2, addr): i = c1+2*c2 - self.wb_slowpath = (self.wb_slowpath[:i] + (addr,) + - self.wb_slowpath[i+1:]) + self.b_slowpath = (self.b_slowpath[:i] + (addr,) + + self.b_slowpath[i+1:]) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -51,7 +51,6 @@ self.float_const_abs_addr = 0 self.malloc_slowpath = 0 self.malloc_slowpath_varsize = 0 - self.wb_slowpath = [0, 0, 0, 0, 0] self.setup_failure_recovery() self.datablockwrapper = None self.stack_check_slowpath = 0 @@ -310,17 +309,21 @@ rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.stack_check_slowpath = rawstart - def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): - descr = self.cpu.gc_ll_descr.write_barrier_descr + def _build_b_slowpath(self, descr, withcards, withfloats=False, + for_frame=False): + is_stm = self.cpu.gc_ll_descr.stm exc0, exc1 = None, None if descr is None: return + if not withcards: - func = descr.get_write_barrier_fn(self.cpu) + func = descr.get_barrier_fn(self.cpu, + returns_modified_object=is_stm) else: + assert not is_stm if descr.jit_wb_cards_set == 0: return - func = descr.get_write_barrier_from_array_fn(self.cpu) + func = descr.get_barrier_from_array_fn(self.cpu) if func == 0: return # @@ -362,11 +365,16 @@ self._store_and_reset_exception(mc, exc0, exc1) mc.CALL(imm(func)) - # + + if descr.returns_modified_object: + # new addr in eax, save in scratch reg + mc.PUSH_r(eax.value) + if withcards: # A final TEST8 before the RET, for the caller. Careful to # not follow this instruction with another one that changes # the status of the CPU flags! + assert not is_stm if IS_X86_32: mc.MOV_rs(eax.value, 3*WORD) else: @@ -374,12 +382,14 @@ mc.TEST8(addr_add_const(eax, descr.jit_wb_if_flag_byteofs), imm(-0x80)) # - if not for_frame: if IS_X86_32: # ADD touches CPU flags mc.LEA_rs(esp.value, 2 * WORD) self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) + + if descr.returns_modified_object: + mc.POP_r(eax.value) mc.RET16_i(WORD) else: if IS_X86_32: @@ -390,13 +400,16 @@ mc.MOV(exc0, RawEspLoc(WORD * 5, REF)) mc.MOV(exc1, RawEspLoc(WORD * 6, INT)) mc.LEA_rs(esp.value, 7 * WORD) + + if descr.returns_modified_object: + mc.POP_r(eax.value) mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) if for_frame: - self.wb_slowpath[4] = rawstart + descr.set_b_slowpath(4, rawstart) else: - self.wb_slowpath[withcards + 2 * withfloats] = rawstart + descr.set_b_slowpath(withcards + 2 * withfloats, rawstart) def assemble_loop(self, loopname, inputargs, operations, looptoken, log): '''adds the following attributes to looptoken: @@ -1027,9 +1040,11 @@ if gcrootmap and gcrootmap.is_stm: wbdescr = self.cpu.gc_ll_descr.P2Wdescr - else: - wbdescr = self.cpu.gc_ll_descr.write_barrier_descr - + self._stm_barrier_fastpath(mc, wbdescr, [ebp], is_frame=True, + align_stack=align_stack) + return + + wbdescr = self.cpu.gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not # an array @@ -1980,13 +1995,43 @@ self.mc.overwrite(jmp_location - 1, chr(offset)) # ------------------- END CALL ASSEMBLER ----------------------- + def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, + align_stack=False): + assert self.cpu.gc_ll_descr.stm + from rpython.jit.backend.llsupport.gc import STMBarrierDescr + assert isinstance(descr, STMBarrierDescr) + assert descr.returns_modified_object + loc_base = arglocs[0] + assert isinstance(loc_base, RegLoc) + # Write only a CALL to the helper prepared in advance, passing it as + # argument the address of the structure we are writing into + # (the first argument to COND_CALL_GC_WB). + helper_num = 0 + if is_frame: + helper_num = 4 + elif self._regalloc is not None and self._regalloc.xrm.reg_bindings: + helper_num += 2 + # + if not is_frame: + mc.PUSH(loc_base) + if is_frame and align_stack: + mc.SUB_ri(esp.value, 16 - WORD) # erase the return address + func = descr.get_b_slowpath(helper_num) + mc.CALL(imm(func)) + mc.MOV_rr(loc_base.value, eax.value) + if is_frame and align_stack: + mc.ADD_ri(esp.value, 16 - WORD) # erase the return address + + + def _write_barrier_fastpath(self, mc, descr, arglocs, array=False, is_frame=False, align_stack=False): # Write code equivalent to write_barrier() in the GC: it checks # a flag in the object at arglocs[0], and if set, it calls a # helper piece of assembler. The latter saves registers as needed # and call the function jit_remember_young_pointer() from the GC. + assert not self.cpu.gc_ll_descr.stm if we_are_translated(): cls = self.cpu.gc_ll_descr.has_write_barrier_class() assert cls is not None and isinstance(descr, cls) @@ -2029,18 +2074,18 @@ helper_num = 4 elif self._regalloc is not None and self._regalloc.xrm.reg_bindings: helper_num += 2 - if self.wb_slowpath[helper_num] == 0: # tests only + if descr.get_b_slowpath(helper_num) == 0: # tests only assert not we_are_translated() self.cpu.gc_ll_descr.write_barrier_descr = descr - self._build_wb_slowpath(card_marking, - bool(self._regalloc.xrm.reg_bindings)) - assert self.wb_slowpath[helper_num] != 0 + self._build_b_slowpath(descr, card_marking, + bool(self._regalloc.xrm.reg_bindings)) + assert descr.get_b_slowpath(helper_num) != 0 # if not is_frame: mc.PUSH(loc_base) if is_frame and align_stack: mc.SUB_ri(esp.value, 16 - WORD) # erase the return address - mc.CALL(imm(self.wb_slowpath[helper_num])) + mc.CALL(imm(descr.get_b_slowpath(helper_num))) if is_frame and align_stack: mc.ADD_ri(esp.value, 16 - WORD) # erase the return address @@ -2105,6 +2150,9 @@ self._write_barrier_fastpath(self.mc, op.getdescr(), arglocs, array=True) + def genop_discard_cond_call_stm_b(self, op, arglocs): + self._stm_barrier_fastpath(self.mc, op.getdescr(), arglocs) + def not_implemented_op_discard(self, op, arglocs): not_implemented("not implemented operation: %s" % op.getopname()) @@ -2129,6 +2177,7 @@ self._check_frame_depth_debug(self.mc) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): + assert not self.cpu.gc_ll_descr.stm assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) self.mc.LEA_rm(edi.value, (eax.value, size)) @@ -2145,6 +2194,7 @@ def malloc_cond_varsize_frame(self, nursery_free_adr, nursery_top_adr, sizeloc, gcmap): + assert not self.cpu.gc_ll_descr.stm if sizeloc is eax: self.mc.MOV(edi, sizeloc) sizeloc = edi @@ -2167,6 +2217,7 @@ def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, lengthloc, itemsize, maxlength, gcmap, arraydescr): + assert not self.cpu.gc_ll_descr.stm from rpython.jit.backend.llsupport.descr import ArrayDescr assert isinstance(arraydescr, ArrayDescr) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -798,6 +798,7 @@ self.perform_discard(op, arglocs) consider_cond_call_gc_wb_array = consider_cond_call_gc_wb + consider_cond_call_stm_b = consider_cond_call_gc_wb def consider_call_malloc_nursery(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr @@ -823,6 +824,10 @@ size, gcmap) def consider_call_malloc_nursery_varsize_frame(self, op): + gc_ll_descr = self.assembler.cpu.gc_ll_descr + assert gc_ll_descr.max_size_of_young_obj is not None + # ^^^ if this returns None, don't translate the rest of this function + # size_box = op.getarg(0) assert isinstance(size_box, BoxInt) # we cannot have a const here! # sizeloc must be in a register, but we can free it now @@ -845,6 +850,9 @@ def consider_call_malloc_nursery_varsize(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr + assert gc_ll_descr.max_size_of_young_obj is not None + # ^^^ if this returns None, don't translate the rest of this function + # if not hasattr(gc_ll_descr, 'max_size_of_young_obj'): raise Exception("unreachable code") # for boehm, this function should never be called diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -96,7 +96,7 @@ @classmethod def JIT_max_size_of_young_obj(cls): - return -1 # XXX: should not be used + return None @classmethod def JIT_minimal_size_in_nursery(cls): diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -625,6 +625,7 @@ # 'no_collect' function can trigger collection import cStringIO err = cStringIO.StringIO() + import sys prev = sys.stdout try: sys.stdout = err diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -47,7 +47,7 @@ return self.gcdata.gc.gcheaderbuilder.header_of_object(obj) def gct_gc_adr_of_root_stack_top(self, hop): - hop.genop("stm_get_root_stack_top") + hop.genop("stm_get_root_stack_top", [], resultvar=hop.spaceop.result) def _gct_with_roots_pushed(self, hop): livevars = self.push_roots(hop) From noreply at buildbot.pypy.org Sun Jul 14 07:17:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 14 Jul 2013 07:17:31 +0200 (CEST) Subject: [pypy-commit] cffi default: Tweak the error message Message-ID: <20130714051731.8FBB51C3553@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1276:a120d2a4a9a5 Date: 2013-07-14 07:17 +0200 http://bitbucket.org/cffi/cffi/changeset/a120d2a4a9a5/ Log: Tweak the error message diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -500,8 +500,8 @@ self._partial_length = True return None # - raise api.FFIError("unsupported non-constant or " - "not immediately constant expression") + raise api.FFIError("unsupported expression: expected a " + "simple numeric constant") def _build_enum_type(self, explicit_name, decls): if decls is not None: From noreply at buildbot.pypy.org Sun Jul 14 13:58:38 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Sun, 14 Jul 2013 13:58:38 +0200 (CEST) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20130714115838.EA53B1C336B@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: Changeset: r65376:d109ca05cdf6 Date: 2013-07-14 01:31 +0200 http://bitbucket.org/pypy/pypy/changeset/d109ca05cdf6/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -16,3 +16,7 @@ .. branch: flowoperators Simplify rpython/flowspace/ code by using more metaprogramming. Create SpaceOperator class to gather static information about flow graph operations. + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. From noreply at buildbot.pypy.org Sun Jul 14 15:28:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 14 Jul 2013 15:28:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Point to the FAQ entry more prominently Message-ID: <20130714132812.3469C1C101D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65377:18ed847a7c64 Date: 2013-07-14 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/18ed847a7c64/ Log: Point to the FAQ entry more prominently diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part From noreply at buildbot.pypy.org Sun Jul 14 18:44:07 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Sun, 14 Jul 2013 18:44:07 +0200 (CEST) Subject: [pypy-commit] pypy distutils-cppldflags: ADD CPPFLAGS and LDFLAGS Message-ID: <20130714164407.90F411C3594@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: distutils-cppldflags Changeset: r65378:d13af1390dae Date: 2013-07-13 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/d13af1390dae/ Log: ADD CPPFLAGS and LDFLAGS diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -124,11 +125,19 @@ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() + cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( From noreply at buildbot.pypy.org Sun Jul 14 18:44:08 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Sun, 14 Jul 2013 18:44:08 +0200 (CEST) Subject: [pypy-commit] pypy distutils-cppldflags: port CPython's implementation of customie_compiler, dont run split on env vars - maybe problematic, support LDSHARED, CPPFLAGS, CFLAGS andLDFLAGS Message-ID: <20130714164408.D93891C3594@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: distutils-cppldflags Changeset: r65379:e52527f04d73 Date: 2013-07-14 17:36 +0200 http://bitbucket.org/pypy/pypy/changeset/e52527f04d73/ Log: port CPython's implementation of customie_compiler, dont run split on env vars - maybe problematic, support LDSHARED, CPPFLAGS, CFLAGS andLDFLAGS diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,9 +12,9 @@ import sys import os -import shlex from distutils.errors import DistutilsPlatformError +from distutils import log; log.set_verbosity(1) PREFIX = os.path.normpath(sys.prefix) @@ -66,6 +66,12 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' + global _config_vars _config_vars = g @@ -123,21 +129,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CPPFLAGS" in os.environ: - cppflags = shlex.split(os.environ["CPPFLAGS"]) - compiler.compiler.extend(cppflags) - compiler.compiler_so.extend(cppflags) - compiler.linker_so.extend(cppflags) - if "CFLAGS" in os.environ: - cflags = shlex.split(os.environ["CFLAGS"]) - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) - if "LDFLAGS" in os.environ: - ldflags = shlex.split(os.environ["LDFLAGS"]) - compiler.linker_so.extend(ldflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( From noreply at buildbot.pypy.org Sun Jul 14 18:44:10 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Sun, 14 Jul 2013 18:44:10 +0200 (CEST) Subject: [pypy-commit] pypy distutils-cppldflags: merge default Message-ID: <20130714164410.138F41C3594@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: distutils-cppldflags Changeset: r65380:0d25b03e335c Date: 2013-07-14 17:38 +0200 http://bitbucket.org/pypy/pypy/changeset/0d25b03e335c/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -16,3 +16,7 @@ .. branch: flowoperators Simplify rpython/flowspace/ code by using more metaprogramming. Create SpaceOperator class to gather static information about flow graph operations. + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. From noreply at buildbot.pypy.org Sun Jul 14 18:44:11 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Sun, 14 Jul 2013 18:44:11 +0200 (CEST) Subject: [pypy-commit] pypy distutils-cppldflags: whatsnew Message-ID: <20130714164411.31EBF1C3594@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: distutils-cppldflags Changeset: r65381:32ef954a6c26 Date: 2013-07-14 17:46 +0200 http://bitbucket.org/pypy/pypy/changeset/32ef954a6c26/ Log: whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -20,3 +20,7 @@ .. branch: package-tk Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch to optionally skip it. + +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. From noreply at buildbot.pypy.org Sun Jul 14 18:44:12 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Sun, 14 Jul 2013 18:44:12 +0200 (CEST) Subject: [pypy-commit] pypy distutils-cppldflags: oups, didnt intend to commit set_verbosity Message-ID: <20130714164412.50D031C3594@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: distutils-cppldflags Changeset: r65382:0c6eeae0316c Date: 2013-07-14 17:49 +0200 http://bitbucket.org/pypy/pypy/changeset/0c6eeae0316c/ Log: oups, didnt intend to commit set_verbosity diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -14,7 +14,6 @@ import os from distutils.errors import DistutilsPlatformError -from distutils import log; log.set_verbosity(1) PREFIX = os.path.normpath(sys.prefix) @@ -72,7 +71,6 @@ g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' g['LDSHARED'] = g['CC'] + ' -shared' - global _config_vars _config_vars = g From noreply at buildbot.pypy.org Sun Jul 14 18:44:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 14 Jul 2013 18:44:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in squeaky/pypy-ldflags/distutils-cppldflags (pull request #162) Message-ID: <20130714164413.7293D1C3596@cobra.cs.uni-duesseldorf.de> Author: arigo Branch: Changeset: r65383:656d89fbb1de Date: 2013-07-14 18:43 +0200 http://bitbucket.org/pypy/pypy/changeset/656d89fbb1de/ Log: Merged in squeaky/pypy-ldflags/distutils-cppldflags (pull request #162) ADD CPPFLAGS and LDFLAGS diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -20,3 +20,7 @@ .. branch: package-tk Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch to optionally skip it. + +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. From noreply at buildbot.pypy.org Sun Jul 14 19:03:38 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 14 Jul 2013 19:03:38 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: move tests around, add failing test for array creation Message-ID: <20130714170338.A20811C35BE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65384:b1a1a9d56af0 Date: 2013-07-13 22:02 +0300 http://bitbucket.org/pypy/pypy/changeset/b1a1a9d56af0/ Log: move tests around, add failing test for array creation diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1459,43 +1459,6 @@ skip('not implemented yet') assert s.view('double') < 7e-323 - def test_subtype_view(self): - from numpypy import ndarray, array - class matrix(ndarray): - def __new__(subtype, data, dtype=None, copy=True): - if isinstance(data, matrix): - return data - return data.view(subtype) - a = array(range(5)) - b = matrix(a) - assert isinstance(b, matrix) - assert (b == a).all() - - def test_subtype_base(self): - from numpypy import ndarray, dtype - class C(ndarray): - def __new__(subtype, shape, dtype): - self = ndarray.__new__(subtype, shape, dtype) - self.id = 'subtype' - return self - a = C([2, 2], int) - assert isinstance(a, C) - assert isinstance(a, ndarray) - assert a.shape == (2, 2) - assert a.dtype is dtype(int) - assert a.id == 'subtype' - a = a.reshape(1, 4) - b = a.reshape(4, 1) - assert isinstance(b, C) - #make sure __new__ was not called - assert not getattr(b, 'id', None) - a.fill(3) - b = a[0] - assert isinstance(b, C) - assert (b == 3).all() - b[0]=100 - assert a[0,0] == 100 - def test_tolist_scalar(self): from numpypy import int32, bool_ x = int32(23) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -25,6 +25,55 @@ self.called_finalize = True return SubType ''') + def test_subtype_base(self): + from numpypy import ndarray, dtype + class C(ndarray): + def __new__(subtype, shape, dtype): + self = ndarray.__new__(subtype, shape, dtype) + self.id = 'subtype' + return self + a = C([2, 2], int) + assert isinstance(a, C) + assert isinstance(a, ndarray) + assert a.shape == (2, 2) + assert a.dtype is dtype(int) + assert a.id == 'subtype' + a = a.reshape(1, 4) + b = a.reshape(4, 1) + assert isinstance(b, C) + #make sure __new__ was not called + assert not getattr(b, 'id', None) + a.fill(3) + b = a[0] + assert isinstance(b, C) + assert (b == 3).all() + b[0]=100 + assert a[0,0] == 100 + + def test_ndarray_from_iterable(self): + from numpypy import array + class Polynomial(object): + def __init__(self, coef): + self.coef = coef + def __iter__(self): + return iter(self.coef) + def __len__(self): + return len(self.coef) + a = array(Polynomial([1, 2, 3])) + + def test_subtype_view(self): + from numpypy import ndarray, array + class matrix(ndarray): + def __new__(subtype, data, dtype=None, copy=True): + if isinstance(data, matrix): + return data + return data.view(subtype) + a = array(range(5)) + b = matrix(a) + assert isinstance(b, matrix) + assert (b == a).all() + + def test_finalize(self): #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray import numpypy as np From noreply at buildbot.pypy.org Sun Jul 14 19:03:39 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 14 Jul 2013 19:03:39 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: move and more correctly define an array(object) test, raise NotImplementedError where this fails Message-ID: <20130714170339.C40B91C35BE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65385:0447d303da95 Date: 2013-07-14 00:28 +0300 http://bitbucket.org/pypy/pypy/changeset/0447d303da95/ Log: move and more correctly define an array(object) test, raise NotImplementedError where this fails diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -538,7 +538,13 @@ return current_guess if current_guess is complex_type: return complex_type - return interp_dtype.get_dtype_cache(space).w_float64dtype + if space.isinstance_w(w_obj, space.w_float): + return interp_dtype.get_dtype_cache(space).w_float64dtype + elif space.isinstance_w(w_obj, space.w_slice): + return long_dtype + raise operationerrfmt(space.w_NotImplementedError, + 'unable to create dtype from objects, ' '"%T" instance not supported', + w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -976,3 +976,16 @@ assert a[0] == 1 assert (a + a)[1] == 4 +class AppTestObjectDtypes(BaseNumpyAppTest): + def test_scalar_from_object(self): + from numpypy import array + class Polynomial(object): + pass + try: + a = array(Polynomial()) + assert a.shape == () + except NotImplementedError, e: + if e.message.find('unable to create dtype from objects')>=0: + skip('creating ojbect dtype not supported yet') + + diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -50,17 +50,6 @@ b[0]=100 assert a[0,0] == 100 - def test_ndarray_from_iterable(self): - from numpypy import array - class Polynomial(object): - def __init__(self, coef): - self.coef = coef - def __iter__(self): - return iter(self.coef) - def __len__(self): - return len(self.coef) - a = array(Polynomial([1, 2, 3])) - def test_subtype_view(self): from numpypy import ndarray, array class matrix(ndarray): From noreply at buildbot.pypy.org Sun Jul 14 19:03:40 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 14 Jul 2013 19:03:40 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: fix test and implementation for where() Message-ID: <20130714170340.F2DAD1C35BE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65386:d88d7304799b Date: 2013-07-14 00:34 +0300 http://bitbucket.org/pypy/pypy/changeset/d88d7304799b/ Log: fix test and implementation for where() diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -88,7 +88,7 @@ y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) - out = W_NDimArray.from_shape(space, shape, dtype, w_subtype=arr) + out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2): diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -100,7 +100,10 @@ from numpypy import where, ones, zeros, array a = array([1, 2, 3, 0, -3]) v = a.view(self.NoNew) - assert False + b = where(array(v) > 0, ones(5), zeros(5)) + assert (b == [1, 1, 1, 0, 0]).all() + # where returns an ndarray irregardless of the subtype of v + assert not isinstance(b, self.NoNew) def test_sub_repeat(self): assert False From noreply at buildbot.pypy.org Sun Jul 14 19:03:42 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 14 Jul 2013 19:03:42 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: fix test_sub_repeat Message-ID: <20130714170342.381671C35BE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65387:a17cf2b4ccad Date: 2013-07-14 00:41 +0300 http://bitbucket.org/pypy/pypy/changeset/a17cf2b4ccad/ Log: fix test_sub_repeat diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -106,7 +106,11 @@ assert not isinstance(b, self.NoNew) def test_sub_repeat(self): - assert False + from numpypy import repeat, array + a = self.SubType(array([[1, 2], [3, 4]])) + b = repeat(a, 3) + assert (b == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]).all() + assert isinstance(b, self.SubType) def test_sub_flatiter(self): from numpypy import array From noreply at buildbot.pypy.org Sun Jul 14 19:03:43 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 14 Jul 2013 19:03:43 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: match strange implementation of round() in numpy Message-ID: <20130714170343.61D451C35BE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65388:8281a068e40f Date: 2013-07-14 00:54 +0300 http://bitbucket.org/pypy/pypy/changeset/8281a068e40f/ Log: match strange implementation of round() in numpy diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -589,6 +589,8 @@ else: calc_dtype = out.get_dtype() + if decimals == 0: + out = out.descr_view(space,space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -131,11 +131,11 @@ from numpypy import array a = array(range(10), dtype=float).view(self.NoNew) # numpy compatibility - b = a.round(decimal=0) + b = a.round(decimals=0) assert isinstance(b, self.NoNew) - b = a.round(decimal=1) + b = a.round(decimals=1) assert not isinstance(b, self.NoNew) - b = a.round(decimal=-1) + b = a.round(decimals=-1) assert not isinstance(b, self.NoNew) def test_sub_dot(self): From noreply at buildbot.pypy.org Sun Jul 14 19:03:44 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 14 Jul 2013 19:03:44 +0200 (CEST) Subject: [pypy-commit] pypy default: fix issue 1537 with numpypy __array_interface__ Message-ID: <20130714170344.90B391C35BE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65389:eafa1bb50ded Date: 2013-07-14 20:02 +0300 http://bitbucket.org/pypy/pypy/changeset/eafa1bb50ded/ Log: fix issue 1537 with numpypy __array_interface__ diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -280,7 +280,7 @@ backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): - return rffi.cast(lltype.Signed, self.storage) + return rffi.cast(lltype.Signed, self.storage) + self.start def get_storage(self): return self.storage diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2212,6 +2212,11 @@ a = a[::2] i = a.__array_interface__ assert isinstance(i['data'][0], int) + b = array(range(9), dtype=int) + c = b[3:5] + b_data = b.__array_interface__['data'][0] + c_data = c.__array_interface__['data'][0] + assert b_data + 3 * b.dtype.itemsize == c_data def test_array_indexing_one_elem(self): from numpypy import array, arange From noreply at buildbot.pypy.org Mon Jul 15 00:06:51 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 15 Jul 2013 00:06:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Write a whatsnew entry for my branch. Message-ID: <20130714220651.8227A1C101D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65390:9f02b5f8efe5 Date: 2013-07-14 15:06 -0700 http://bitbucket.org/pypy/pypy/changeset/9f02b5f8efe5/ Log: Write a whatsnew entry for my branch. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -24,3 +24,8 @@ .. branch: distutils-cppldflags Copy CPython's implementation of customize_compiler, dont call split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. From noreply at buildbot.pypy.org Mon Jul 15 10:33:38 2013 From: noreply at buildbot.pypy.org (Ben Darnell) Date: Mon, 15 Jul 2013 10:33:38 +0200 (CEST) Subject: [pypy-commit] pypy ssl_moving_write_buffer: Add the SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER flag. Message-ID: <20130715083338.8C26A1C039A@cobra.cs.uni-duesseldorf.de> Author: Ben Darnell Branch: ssl_moving_write_buffer Changeset: r65391:d78b1a28fe34 Date: 2013-07-13 10:32 -0400 http://bitbucket.org/pypy/pypy/changeset/d78b1a28fe34/ Log: Add the SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER flag. This disables a sanity check in openssl that can cause problems when it is used in non-blocking mode and the GC causes the address of a str object to change (https://bugs.pypy.org/issue1238). diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -722,7 +722,10 @@ libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -93,6 +93,7 @@ SSL_RECEIVED_SHUTDOWN = rffi_platform.ConstantInteger( "SSL_RECEIVED_SHUTDOWN") SSL_MODE_AUTO_RETRY = rffi_platform.ConstantInteger("SSL_MODE_AUTO_RETRY") + SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER = rffi_platform.ConstantInteger("SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER") NID_subject_alt_name = rffi_platform.ConstantInteger("NID_subject_alt_name") GEN_DIRNAME = rffi_platform.ConstantInteger("GEN_DIRNAME") From noreply at buildbot.pypy.org Mon Jul 15 10:33:39 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 15 Jul 2013 10:33:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in bdarnell/pypy/ssl_moving_write_buffer (pull request #160) Message-ID: <20130715083339.D374F1C101D@cobra.cs.uni-duesseldorf.de> Author: arigo Branch: Changeset: r65392:a1903b9725c4 Date: 2013-07-15 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/a1903b9725c4/ Log: Merged in bdarnell/pypy/ssl_moving_write_buffer (pull request #160) Add the SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER flag. This is the same code as http://hg.python.org/cpython/rev/60310223d075 . diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -722,7 +722,10 @@ libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -93,6 +93,7 @@ SSL_RECEIVED_SHUTDOWN = rffi_platform.ConstantInteger( "SSL_RECEIVED_SHUTDOWN") SSL_MODE_AUTO_RETRY = rffi_platform.ConstantInteger("SSL_MODE_AUTO_RETRY") + SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER = rffi_platform.ConstantInteger("SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER") NID_subject_alt_name = rffi_platform.ConstantInteger("NID_subject_alt_name") GEN_DIRNAME = rffi_platform.ConstantInteger("GEN_DIRNAME") From noreply at buildbot.pypy.org Mon Jul 15 11:49:35 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 15 Jul 2013 11:49:35 +0200 (CEST) Subject: [pypy-commit] pypy faster-set-of-iterator: experimenting with a general "unpack_into" interface. still unclear whether Message-ID: <20130715094935.9958D1C039A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-set-of-iterator Changeset: r65393:8219b71c6184 Date: 2013-07-09 01:34 +0200 http://bitbucket.org/pypy/pypy/changeset/8219b71c6184/ Log: experimenting with a general "unpack_into" interface. still unclear whether it's a good idea. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -4,8 +4,7 @@ from rpython.tool.uid import HUGEVAL_BYTES from rpython.rlib import jit, types from rpython.rlib.debug import make_sure_not_resized -from rpython.rlib.objectmodel import (we_are_translated, newlist_hint, - compute_unique_id) +from rpython.rlib.objectmodel import (we_are_translated, compute_unique_id) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint @@ -234,6 +233,18 @@ def __spacebind__(self, space): return self + def unpack_into(self, space, unpack_target): + w_iterator = space.iter(self) + while True: + # YYY jit driver + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + unpack_target.append(w_item) + def unwrap(self, space): """NOT_RPYTHON""" # _____ this code is here to support testing only _____ @@ -751,21 +762,20 @@ """Unpack an iterable into a real (interpreter-level) list. Raise an OperationError(w_ValueError) if the length is wrong.""" + from pypy.interpreter import unpack w_iterator = self.iter(w_iterable) if expected_length == -1: - # xxx special hack for speed - from pypy.interpreter.generator import GeneratorIterator - if isinstance(w_iterator, GeneratorIterator): - lst_w = [] - w_iterator.unpack_into(lst_w) - return lst_w - # /xxx - return self._unpackiterable_unknown_length(w_iterator, w_iterable) + unpack_target = unpack.InterpListUnpackTarget(self, w_iterable) + self.unpack_into(w_iterable, unpack_target) + return unpack_target.items_w else: lst_w = self._unpackiterable_known_length(w_iterator, expected_length) return lst_w[:] # make the resulting list resizable + def unpack_into(self, w_iterable, unpack_target): + w_iterable.unpack_into(self, unpack_target) + def iteriterable(self, w_iterable): return W_InterpIterable(self, w_iterable) @@ -773,26 +783,6 @@ """Unpack an iterable of unknown length into an interp-level list. """ - # If we can guess the expected length we can preallocate. - try: - items = newlist_hint(self.length_hint(w_iterable, 0)) - except MemoryError: - items = [] # it might have lied - - tp = self.type(w_iterator) - while True: - unpackiterable_driver.jit_merge_point(tp=tp, - w_iterator=w_iterator, - items=items) - try: - w_item = self.next(w_iterator) - except OperationError, e: - if not e.match(self, self.w_StopIteration): - raise - break # done - items.append(w_item) - # - return items @jit.dont_look_inside def _unpackiterable_known_length(self, w_iterator, expected_length): @@ -805,21 +795,10 @@ @jit.unroll_safe def _unpackiterable_known_length_jitlook(self, w_iterator, expected_length): - items = [None] * expected_length - idx = 0 - while True: - try: - w_item = self.next(w_iterator) - except OperationError, e: - if not e.match(self, self.w_StopIteration): - raise - break # done - if idx == expected_length: - raise OperationError(self.w_ValueError, - self.wrap("too many values to unpack")) - items[idx] = w_item - idx += 1 - if idx < expected_length: + from pypy.interpreter import unpack + unpack_target = unpack.FixedSizeUnpackTarget(self, w_iterable) + self.unpack_into(unpack_target) + if unpack_target.index < expected_length: if idx == 1: plural = "" else: @@ -827,7 +806,7 @@ raise operationerrfmt(self.w_ValueError, "need more than %d value%s to unpack", idx, plural) - return items + return unpack_target.items_w def unpackiterable_unroll(self, w_iterable, expected_length): # Like unpackiterable(), but for the cases where we have @@ -835,6 +814,7 @@ # Returns a fixed-size list. w_iterator = self.iter(w_iterable) assert expected_length != -1 + # YYY correct unrolling return self._unpackiterable_known_length_jitlook(w_iterator, expected_length) diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -177,7 +177,7 @@ reds=['self', 'frame', 'results'], name='unpack_into') - def unpack_into(self, results): + def unpack_into(self, space, results): """This is a hack for performance: runs the generator and collects all produced items in a list.""" # XXX copied and simplified version of send_ex() diff --git a/pypy/interpreter/unpack.py b/pypy/interpreter/unpack.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/unpack.py @@ -0,0 +1,34 @@ +from rpython.rlib.objectmodel import newlist_hint + +class UnpackTarget(object): + def __init__(self, space): + self.space = space + + def append(self, w_obj): + raise NotImplementedError("abstract base class") + + +class InterpListUnpackTarget(UnpackTarget): + def __init__(self, space, w_iterable): + self.space = space + try: + items_w = newlist_hint(self.space.length_hint(w_iterable, 0)) + except MemoryError: + items_w = [] # it might have lied + self.items_w = items_w + + def append(self, w_obj): + self.items_w.append(w_obj) + + +class FixedSizeUnpackTarget(UnpackTarget): + def __init__(self, space, expected_size): + self.items_w = [None] * expected_size + self.index = 0 + + def append(self, w_obj): + if self.index == len(self.items_w): + raise OperationError(self.w_ValueError, + self.wrap("too many values to unpack")) + items[self.index] = w_item + self.index += 1 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -14,7 +14,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import (WrappedDefault, unwrap_spec, applevel, interp2app) -from pypy.interpreter.generator import GeneratorIterator +from pypy.interpreter import unpack from pypy.interpreter.signature import Signature from pypy.objspace.std import slicetype from pypy.objspace.std.floatobject import W_FloatObject @@ -100,36 +100,13 @@ return space.fromcache(ObjectListStrategy) +class ListUnpackTarget(unpack.UnpackTarget): + def __init__(self, space, w_list): + self.space = space + self.w_list = w_list -def _get_printable_location(w_type): - return ('list__do_extend_from_iterable [w_type=%s]' % - w_type.getname(w_type.space)) - - -_do_extend_jitdriver = jit.JitDriver( - name='list__do_extend_from_iterable', - greens=['w_type'], - reds=['i', 'w_iterator', 'w_list'], - get_printable_location=_get_printable_location) - -def _do_extend_from_iterable(space, w_list, w_iterable): - w_iterator = space.iter(w_iterable) - w_type = space.type(w_iterator) - i = 0 - while True: - _do_extend_jitdriver.jit_merge_point(w_type=w_type, - i=i, - w_iterator=w_iterator, - w_list=w_list) - try: - w_list.append(space.next(w_iterator)) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - i += 1 - return i - + def append(self, w_obj): + self.w_list.append(w_obj) def list_unroll_condition(w_list1, space, w_list2): return (jit.loop_unrolling_heuristic(w_list1, w_list1.length(), @@ -790,10 +767,8 @@ if type(w_any) is W_ListObject or (isinstance(w_any, W_ListObject) and self.space._uses_list_iter(w_any)): self._extend_from_list(w_list, w_any) - elif isinstance(w_any, GeneratorIterator): - w_any.unpack_into_w(w_list) - else: - self._extend_from_iterable(w_list, w_any) + return + self._extend_from_iterable(w_list, w_any) def _extend_from_list(self, w_list, w_other): raise NotImplementedError @@ -804,11 +779,12 @@ if length_hint: w_list._resize_hint(w_list.length() + length_hint) - extended = _do_extend_from_iterable(self.space, w_list, w_iterable) + self.space.unpack_into(w_iterable, ListUnpackTarget(self.space, w_list)) # cut back if the length hint was too large - if extended < length_hint: - w_list._resize_hint(w_list.length()) + length = w_list.length() + if length < length_hint: + w_list._resize_hint(length) def reverse(self, w_list): raise NotImplementedError diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter import gateway +from pypy.interpreter import gateway, unpack from pypy.interpreter.error import OperationError from pypy.interpreter.signature import Signature from pypy.interpreter.baseobjspace import W_Root @@ -1568,56 +1568,19 @@ w_set.sstorage = strategy.get_storage_from_unwrapped_list(intlist) return - iterable_w = space.listview(w_iterable) + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + space.unpack_into(w_iterable, SetUnpackTarget(space, w_set)) - if len(iterable_w) == 0: - w_set.strategy = strategy = space.fromcache(EmptySetStrategy) - w_set.sstorage = strategy.get_empty_storage() - return +class SetUnpackTarget(unpack.UnpackTarget): + # YYY is unrolling correctly done? + def __init__(self, space, w_set): + self.space = space + self.w_set = w_set - _pick_correct_strategy(space, w_set, iterable_w) + def append(self, w_obj): + self.w_set.add(w_obj) - at jit.look_inside_iff(lambda space, w_set, iterable_w: - jit.loop_unrolling_heuristic(iterable_w, len(iterable_w), UNROLL_CUTOFF)) -def _pick_correct_strategy(space, w_set, iterable_w): - # check for integers - for w_item in iterable_w: - if type(w_item) is not W_IntObject: - break - else: - w_set.strategy = space.fromcache(IntegerSetStrategy) - w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) - return - - # check for strings - for w_item in iterable_w: - if type(w_item) is not W_StringObject: - break - else: - w_set.strategy = space.fromcache(StringSetStrategy) - w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) - return - - # check for unicode - for w_item in iterable_w: - if type(w_item) is not W_UnicodeObject: - break - else: - w_set.strategy = space.fromcache(UnicodeSetStrategy) - w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) - return - - # check for compares by identity - for w_item in iterable_w: - if not space.type(w_item).compares_by_identity(): - break - else: - w_set.strategy = space.fromcache(IdentitySetStrategy) - w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) - return - - w_set.strategy = space.fromcache(ObjectSetStrategy) - w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) init_signature = Signature(['some_iterable'], None, None) init_defaults = [None] From noreply at buildbot.pypy.org Mon Jul 15 11:49:36 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 15 Jul 2013 11:49:36 +0200 (CEST) Subject: [pypy-commit] pypy faster-set-of-iterator: merge Message-ID: <20130715094936.ED9781C039A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-set-of-iterator Changeset: r65394:e66b006b2ecc Date: 2013-07-09 01:43 +0200 http://bitbucket.org/pypy/pypy/changeset/e66b006b2ecc/ Log: merge diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1039,15 +1039,18 @@ def update1(space, w_dict, w_data): - if space.findattr(w_data, space.wrap("keys")) is None: + if isinstance(w_data, W_DictMultiObject): # optimization case only + update1_dict_dict(space, w_dict, w_data) + return + w_method = space.findattr(w_data, space.wrap("keys")) + if w_method is None: # no 'keys' method, so we assume it is a sequence of pairs - update1_pairs(space, w_dict, w_data) + data_w = space.listview(w_data) + update1_pairs(space, w_dict, data_w) else: - if isinstance(w_data, W_DictMultiObject): # optimization case only - update1_dict_dict(space, w_dict, w_data) - else: - # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" - update1_keys(space, w_dict, w_data) + # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" + data_w = space.listview(space.call_function(w_method)) + update1_keys(space, w_dict, data_w) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -1061,8 +1064,8 @@ w_dict.setitem(w_key, w_value) -def update1_pairs(space, w_dict, w_data): - for w_pair in space.listview(w_data): +def update1_pairs(space, w_dict, data_w): + for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: raise OperationError(space.w_ValueError, @@ -1071,9 +1074,8 @@ w_dict.setitem(w_key, w_value) -def update1_keys(space, w_dict, w_data): - w_keys = space.call_method(w_data, "keys") - for w_key in space.listview(w_keys): +def update1_keys(space, w_dict, data_w): + for w_key in data_w: w_value = space.getitem(w_data, w_key) w_dict.setitem(w_key, w_value) From noreply at buildbot.pypy.org Mon Jul 15 11:49:38 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 15 Jul 2013 11:49:38 +0200 (CEST) Subject: [pypy-commit] pypy faster-set-of-iterator: use new interface in dicts as well Message-ID: <20130715094938.1E3671C039A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-set-of-iterator Changeset: r65395:e8fa23054252 Date: 2013-07-09 07:58 +0200 http://bitbucket.org/pypy/pypy/changeset/e8fa23054252/ Log: use new interface in dicts as well diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -4,6 +4,7 @@ WrappedDefault, applevel, interp2app, unwrap_spec) from pypy.interpreter.mixedmodule import MixedModule from pypy.interpreter.signature import Signature +from pypy.interpreter import unpack from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate @@ -1045,12 +1046,13 @@ w_method = space.findattr(w_data, space.wrap("keys")) if w_method is None: # no 'keys' method, so we assume it is a sequence of pairs - data_w = space.listview(w_data) - update1_pairs(space, w_dict, data_w) + unpacker = PairDictUpdateUnpacker(space, w_dict) + w_iterable = w_data else: + unpacker = KeyDictUpdateUnpacker(space, w_dict) # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" - data_w = space.listview(space.call_function(w_method)) - update1_keys(space, w_dict, data_w) + w_iterable = space.call_function(w_method) + space.unpack_into(w_iterable, unpacker) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -1063,21 +1065,23 @@ break w_dict.setitem(w_key, w_value) +class PairDictUpdateUnpacker(unpack.UnpackTarget): + def __init__(self, space, w_dict): + self.space = space + self.w_dict = w_dict -def update1_pairs(space, w_dict, data_w): - for w_pair in data_w: - pair = space.fixedview(w_pair) + def append(self, w_pair): + pair = self.space.fixedview(w_pair) if len(pair) != 2: - raise OperationError(space.w_ValueError, - space.wrap("sequence of pairs expected")) + raise OperationError(self.space.w_ValueError, + self.space.wrap("sequence of pairs expected")) w_key, w_value = pair - w_dict.setitem(w_key, w_value) + self.w_dict.setitem(w_key, w_value) - -def update1_keys(space, w_dict, data_w): - for w_key in data_w: - w_value = space.getitem(w_data, w_key) - w_dict.setitem(w_key, w_value) +class KeyDictUpdateUnpacker(PairDictUpdateUnpacker): + def append(self, w_key): + w_value = self.space.getitem(w_data, w_key) + self.w_dict.setitem(w_key, w_value) init_signature = Signature(['seq_or_map'], None, 'kwargs') From noreply at buildbot.pypy.org Mon Jul 15 11:49:39 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 15 Jul 2013 11:49:39 +0200 (CEST) Subject: [pypy-commit] pypy faster-set-of-iterator: potential todo Message-ID: <20130715094939.90FF71C039A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-set-of-iterator Changeset: r65396:dce5e71147e8 Date: 2013-07-09 08:55 +0200 http://bitbucket.org/pypy/pypy/changeset/dce5e71147e8/ Log: potential todo diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -488,6 +488,7 @@ if isinstance(w_any, W_ListObject): self.setslice(start, step, slicelength, w_any) else: + # YYY use different interface here to prevent packing and unpacking? sequence_w = space.listview(w_any) w_other = W_ListObject(space, sequence_w) self.setslice(start, step, slicelength, w_other) From noreply at buildbot.pypy.org Mon Jul 15 11:49:40 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 15 Jul 2013 11:49:40 +0200 (CEST) Subject: [pypy-commit] pypy faster-set-of-iterator: reinstante jit driver support, some cleanups Message-ID: <20130715094940.B25371C039A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-set-of-iterator Changeset: r65397:052963cf1e59 Date: 2013-07-10 22:07 +0200 http://bitbucket.org/pypy/pypy/changeset/052963cf1e59/ Log: reinstante jit driver support, some cleanups diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -20,9 +20,6 @@ UINT_MAX_32_BITS = r_uint(4294967295) -unpackiterable_driver = jit.JitDriver(name='unpackiterable', - greens=['tp'], - reds=['items', 'w_iterator']) class W_Root(object): @@ -32,6 +29,9 @@ _settled_ = True user_overridden_class = False + # unpacking support + from pypy.interpreter.unpack import generic_unpack_into as unpack_into + def getdict(self, space): return None @@ -233,17 +233,6 @@ def __spacebind__(self, space): return self - def unpack_into(self, space, unpack_target): - w_iterator = space.iter(self) - while True: - # YYY jit driver - try: - w_item = space.next(w_iterator) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break # done - unpack_target.append(w_item) def unwrap(self, space): """NOT_RPYTHON""" @@ -763,41 +752,25 @@ Raise an OperationError(w_ValueError) if the length is wrong.""" from pypy.interpreter import unpack - w_iterator = self.iter(w_iterable) if expected_length == -1: unpack_target = unpack.InterpListUnpackTarget(self, w_iterable) self.unpack_into(w_iterable, unpack_target) return unpack_target.items_w else: - lst_w = self._unpackiterable_known_length(w_iterator, + lst_w = self._unpackiterable_known_length(w_iterable, expected_length) return lst_w[:] # make the resulting list resizable - def unpack_into(self, w_iterable, unpack_target): - w_iterable.unpack_into(self, unpack_target) + def unpack_into(self, w_iterable, unpack_target, unroll=False): + w_iterable.unpack_into(self, unpack_target, unroll) def iteriterable(self, w_iterable): return W_InterpIterable(self, w_iterable) - def _unpackiterable_unknown_length(self, w_iterator, w_iterable): - """Unpack an iterable of unknown length into an interp-level - list. - """ - - @jit.dont_look_inside - def _unpackiterable_known_length(self, w_iterator, expected_length): - # Unpack a known length list, without letting the JIT look inside. - # Implemented by just calling the @jit.unroll_safe version, but - # the JIT stopped looking inside already. - return self._unpackiterable_known_length_jitlook(w_iterator, - expected_length) - - @jit.unroll_safe - def _unpackiterable_known_length_jitlook(self, w_iterator, - expected_length): + def _unpackiterable_known_length(self, w_iterable, expected_length, unroll=False): from pypy.interpreter import unpack unpack_target = unpack.FixedSizeUnpackTarget(self, w_iterable) - self.unpack_into(unpack_target) + self.unpack_into(w_iterable, unpack_target, unroll=unroll) if unpack_target.index < expected_length: if idx == 1: plural = "" @@ -812,11 +785,9 @@ # Like unpackiterable(), but for the cases where we have # an expected_length and want to unroll when JITted. # Returns a fixed-size list. - w_iterator = self.iter(w_iterable) assert expected_length != -1 - # YYY correct unrolling - return self._unpackiterable_known_length_jitlook(w_iterator, - expected_length) + return self._unpackiterable_known_length( + w_iterable, expected_length, unroll=True) def length_hint(self, w_obj, default): """Return the length of an object, consulting its __length_hint__ diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -3,6 +3,9 @@ from pypy.interpreter.pyopcode import LoopBlock from rpython.rlib import jit +unpack_into_jitdriver = jit.JitDriver(greens=['pycode'], + reds=['self', 'frame', 'unpack_target'], + name='unpack_into') class GeneratorIterator(W_Root): "An iterator created by a generator." @@ -169,45 +172,35 @@ break block = block.previous - # Results can be either an RPython list of W_Root, or it can be an - # app-level W_ListObject, which also has an append() method, that's why we - # generate 2 versions of the function and 2 jit drivers. - def _create_unpack_into(): - jitdriver = jit.JitDriver(greens=['pycode'], - reds=['self', 'frame', 'results'], - name='unpack_into') - - def unpack_into(self, space, results): - """This is a hack for performance: runs the generator and collects - all produced items in a list.""" - # XXX copied and simplified version of send_ex() - space = self.space - if self.running: - raise OperationError(space.w_ValueError, - space.wrap('generator already executing')) - frame = self.frame - if frame is None: # already finished - return - self.running = True - try: - pycode = self.pycode - while True: - jitdriver.jit_merge_point(self=self, frame=frame, - results=results, pycode=pycode) - try: - w_result = frame.execute_frame(space.w_None) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - # if the frame is now marked as finished, it was RETURNed from - if frame.frame_finished_execution: - break - results.append(w_result) # YIELDed - finally: - frame.f_backref = jit.vref_None - self.running = False - self.frame = None - return unpack_into - unpack_into = _create_unpack_into() - unpack_into_w = _create_unpack_into() + def unpack_into(self, space, unpack_target, unroll=False): + """This is a hack for performance: runs the generator and collects + all produced items in a list.""" + # YYY stop ignoring unroll + # XXX copied and simplified version of send_ex() + space = self.space + if self.running: + raise OperationError(space.w_ValueError, + space.wrap('generator already executing')) + frame = self.frame + if frame is None: # already finished + return + self.running = True + try: + pycode = self.pycode + while True: + unpack_into_jitdriver.jit_merge_point(self=self, frame=frame, + unpack_target=unpack_target, pycode=pycode) + try: + w_result = frame.execute_frame(space.w_None) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + # if the frame is now marked as finished, it was RETURNed from + if frame.frame_finished_execution: + break + unpack_target.append(w_result) # YIELDed + finally: + frame.f_backref = jit.vref_None + self.running = False + self.frame = None diff --git a/pypy/interpreter/unpack.py b/pypy/interpreter/unpack.py --- a/pypy/interpreter/unpack.py +++ b/pypy/interpreter/unpack.py @@ -1,4 +1,7 @@ from rpython.rlib.objectmodel import newlist_hint +from rpython.rlib import jit + +from pypy.interpreter.error import OperationError class UnpackTarget(object): def __init__(self, space): @@ -30,5 +33,31 @@ if self.index == len(self.items_w): raise OperationError(self.w_ValueError, self.wrap("too many values to unpack")) - items[self.index] = w_item + self.items_w[self.index] = w_item self.index += 1 + + + +unpack_into_driver = jit.JitDriver(name='unpack_into', + greens=['unroll', 'w_type'], + reds=['unpack_target', 'w_iterator']) + +def generic_unpack_into(w_iterable, space, unpack_target, unroll=False): + w_iterator = space.iter(w_iterable) + w_type = space.type(w_iterator) + while True: + if not unroll: + unpack_into_driver.can_enter_jit(w_type=w_type, unroll=unroll, + w_iterator=w_iterator, + unpack_target=unpack_target) + unpack_into_driver.jit_merge_point(w_type=w_type, unroll=unroll, + w_iterator=w_iterator, + unpack_target=unpack_target) + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + unpack_target.append(w_item) + From noreply at buildbot.pypy.org Mon Jul 15 11:49:41 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 15 Jul 2013 11:49:41 +0200 (CEST) Subject: [pypy-commit] pypy faster-set-of-iterator: test tracing of unpacking independently of the rest of pypy. fix some of the Message-ID: <20130715094941.D2BDC1C039A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-set-of-iterator Changeset: r65398:4b0e328a1aa0 Date: 2013-07-11 19:58 +0200 http://bitbucket.org/pypy/pypy/changeset/4b0e328a1aa0/ Log: test tracing of unpacking independently of the rest of pypy. fix some of the bugs discovered while doing that. diff --git a/pypy/interpreter/unpack.py b/pypy/interpreter/unpack.py --- a/pypy/interpreter/unpack.py +++ b/pypy/interpreter/unpack.py @@ -26,33 +26,37 @@ class FixedSizeUnpackTarget(UnpackTarget): def __init__(self, space, expected_size): + self.space = space self.items_w = [None] * expected_size self.index = 0 def append(self, w_obj): if self.index == len(self.items_w): - raise OperationError(self.w_ValueError, - self.wrap("too many values to unpack")) - self.items_w[self.index] = w_item + raise OperationError(self.space.w_ValueError, + self.space.wrap("too many values to unpack")) + self.items_w[self.index] = w_obj self.index += 1 unpack_into_driver = jit.JitDriver(name='unpack_into', - greens=['unroll', 'w_type'], + greens=['unroll', 'unpackcls', 'w_type'], reds=['unpack_target', 'w_iterator']) def generic_unpack_into(w_iterable, space, unpack_target, unroll=False): w_iterator = space.iter(w_iterable) w_type = space.type(w_iterator) + unpackcls = type(unpack_target) while True: if not unroll: unpack_into_driver.can_enter_jit(w_type=w_type, unroll=unroll, w_iterator=w_iterator, - unpack_target=unpack_target) + unpack_target=unpack_target, + unpackcls=unpackcls) unpack_into_driver.jit_merge_point(w_type=w_type, unroll=unroll, w_iterator=w_iterator, - unpack_target=unpack_target) + unpack_target=unpack_target, + unpackcls=unpackcls) try: w_item = space.next(w_iterator) except OperationError, e: diff --git a/pypy/module/pypyjit/test/test_unpacktracing.py b/pypy/module/pypyjit/test/test_unpacktracing.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test/test_unpacktracing.py @@ -0,0 +1,107 @@ +from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.rlib import jit + +from pypy.interpreter import unpack, error + +class W_Root(object): + type = None + + from pypy.interpreter.unpack import generic_unpack_into as unpack_into + + def next(self): + raise NotImplementedError + def iter(self): + raise NotImplementedError + +class W_Type(W_Root): + pass +W_Type.type = W_Type() + +class W_List(W_Root): + type = W_Type() + def __init__(self, l): + self.l = l + + def iter(self): + return W_Iter(self.l) + +class W_Iter(W_Root): + type = W_Type() + def __init__(self, l): + self.l = l + self.i = 0 + + def next(self): + i = self.i + if i >= len(self.l): + raise error.OperationError(StopIteration, None) + self.i += 1 + return self.l[i] + +class W_Int(W_Root): + type = W_Type() + + def __init__(self, value): + self.value = value + +class W_String(W_Root): + type = W_Type() + + def __init__(self, value): + self.value = value + +class FakeSpace(object): + w_StopIteration = StopIteration + w_ValueError = ValueError + + def iter(self, w_obj): + return w_obj.iter() + + def next(self, w_obj): + return w_obj.next() + + def type(self, w_obj): + return w_obj.type + + def length_hint(self, w_obj, x): + return 7 + + def exception_match(self, w_obj, w_cls): + return w_obj is w_cls + + def wrap(self, string): + return W_String(string) + + def _freeze_(self): + return True + +space = FakeSpace() + + +class TestUnpackJIT(LLJitMixin): + def test_jit_unpack(self): + def f(i): + l = [W_Int(x) for x in range(100 + i)] + l.append(W_Int(i)) + + w_l = W_List(l) + res = 0 + target = unpack.FixedSizeUnpackTarget(space, len(l)) + if i < 0: + w_l.unpack_into(space, target, unroll=True) + else: + w_l.unpack_into(space, target) + res += len(target.items_w) + target = unpack.InterpListUnpackTarget(space, w_l) + w_l.unpack_into(space, target) + return len(target.items_w) + res + assert f(4) == 210 + + # hack + from rpython.jit.metainterp import compile + class A(object): + view = viewloops = False + compile.option = A() + result = self.meta_interp(f, [4], listops=True, backendopt=True, listcomp=True) + assert result == 210 + self.check_trace_count(2) From noreply at buildbot.pypy.org Mon Jul 15 11:49:43 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 15 Jul 2013 11:49:43 +0200 (CEST) Subject: [pypy-commit] pypy faster-set-of-iterator: do unrolling the old-fashioned way. Write a test that it works. Message-ID: <20130715094943.02A8A1C039A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: faster-set-of-iterator Changeset: r65399:ad1ca28aad79 Date: 2013-07-11 22:13 +0200 http://bitbucket.org/pypy/pypy/changeset/ad1ca28aad79/ Log: do unrolling the old-fashioned way. Write a test that it works. diff --git a/pypy/interpreter/unpack.py b/pypy/interpreter/unpack.py --- a/pypy/interpreter/unpack.py +++ b/pypy/interpreter/unpack.py @@ -40,20 +40,21 @@ unpack_into_driver = jit.JitDriver(name='unpack_into', - greens=['unroll', 'unpackcls', 'w_type'], + greens=['unpackcls', 'w_type'], reds=['unpack_target', 'w_iterator']) def generic_unpack_into(w_iterable, space, unpack_target, unroll=False): + if unroll: + return generic_unpack_into_unroll(w_iterable, space, unpack_target) + else: + return generic_unpack_into_jitdriver(w_iterable, space, unpack_target) + +def generic_unpack_into_jitdriver(w_iterable, space, unpack_target): w_iterator = space.iter(w_iterable) w_type = space.type(w_iterator) unpackcls = type(unpack_target) while True: - if not unroll: - unpack_into_driver.can_enter_jit(w_type=w_type, unroll=unroll, - w_iterator=w_iterator, - unpack_target=unpack_target, - unpackcls=unpackcls) - unpack_into_driver.jit_merge_point(w_type=w_type, unroll=unroll, + unpack_into_driver.jit_merge_point(w_type=w_type, w_iterator=w_iterator, unpack_target=unpack_target, unpackcls=unpackcls) @@ -65,3 +66,15 @@ break # done unpack_target.append(w_item) + at jit.unroll_safe +def generic_unpack_into_unroll(w_iterable, space, unpack_target): + w_iterator = space.iter(w_iterable) + while True: + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + unpack_target.append(w_item) + diff --git a/pypy/module/pypyjit/test/test_unpacktracing.py b/pypy/module/pypyjit/test/test_unpacktracing.py --- a/pypy/module/pypyjit/test/test_unpacktracing.py +++ b/pypy/module/pypyjit/test/test_unpacktracing.py @@ -97,11 +97,30 @@ return len(target.items_w) + res assert f(4) == 210 - # hack - from rpython.jit.metainterp import compile - class A(object): - view = viewloops = False - compile.option = A() result = self.meta_interp(f, [4], listops=True, backendopt=True, listcomp=True) assert result == 210 self.check_trace_count(2) + + def test_unroll(self): + unpack_into_driver = jit.JitDriver(greens=[], reds='auto') + def f(i): + l = [W_Int(x) for x in range(i)] + l.append(W_Int(i)) + + w_l = W_List(l) + res = 0 + for i in range(100): + unpack_into_driver.jit_merge_point() + target = unpack.FixedSizeUnpackTarget(space, len(l)) + if i < 0: + w_l.unpack_into(space, target) + else: + w_l.unpack_into(space, target, unroll=True) + res += len(target.items_w) + return res + assert f(4) == 500 + + result = self.meta_interp(f, [4], listops=True, backendopt=True, listcomp=True) + assert result == 500 + self.check_resops(getarrayitem_gc=10, setarrayitem_gc=10, + call_may_force=0) From noreply at buildbot.pypy.org Mon Jul 15 12:18:04 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 15 Jul 2013 12:18:04 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: forgot OP_STM_GET_ROOT_STACK_TOP in funcgen Message-ID: <20130715101804.1E86E1C0205@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65400:7a372f0f9000 Date: 2013-07-15 08:35 +0200 http://bitbucket.org/pypy/pypy/changeset/7a372f0f9000/ Log: forgot OP_STM_GET_ROOT_STACK_TOP in funcgen diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -591,6 +591,7 @@ OP_STM_PTR_EQ = _OP_STM OP_STM_PUSH_ROOT = _OP_STM OP_STM_POP_ROOT_INTO = _OP_STM + OP_STM_GET_ROOT_STACK_TOP = _OP_STM OP_STM_ALLOCATE = _OP_STM OP_STM_GET_TID = _OP_STM OP_STM_HASH = _OP_STM From noreply at buildbot.pypy.org Mon Jul 15 12:18:05 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 15 Jul 2013 12:18:05 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: satisfy some tests Message-ID: <20130715101805.60CE01C0205@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65401:d11b7c7058b0 Date: 2013-07-15 12:17 +0200 http://bitbucket.org/pypy/pypy/changeset/d11b7c7058b0/ Log: satisfy some tests diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -75,11 +75,18 @@ from rpython.translator.c import genc # t = TranslationContext() - t.config.translation.gc = gc + gcrootfinder = kwds['gcrootfinder'] + if gcrootfinder == 'stm': + t.config.translation.stm = True + t.config.translation.gc = 'stmgc' + else: + t.config.translation.gc = gc + # if gc != 'boehm': t.config.translation.gcremovetypeptr = True for name, value in kwds.items(): setattr(t.config.translation, name, value) + ann = t.buildannotator() ann.build_types(f, [s_list_of_strings], main_entry_point=True) t.buildrtyper().specialize() diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2188,7 +2188,7 @@ operations = [ ResOperation(rop.COND_CALL_GC_WB, [p0, ConstInt(0)], None, descr=WriteBarrierDescr()), - ResOperation(rop.FINISH, [p0], None, descr=BasicFailDescr(1)) + ResOperation(rop.FINISH, [p0], None, descr=BasicFinalDescr(0)) ] inputargs = [p0] looptoken = JitCellToken() @@ -4246,10 +4246,9 @@ class WBDescrForTests(AbstractDescr): returns_modified_object = False b_slowpath = (0, 0, 0, 0) - def get_b_slowpath(self, c1, c2): - return self.b_slowpath[c1+2*c2] - def set_b_slowpath(self, c1, c2, addr): - i = c1+2*c2 - self.b_slowpath = (self.b_slowpath[:i] + (addr,) + - self.b_slowpath[i+1:]) + def get_b_slowpath(self, c1): + return self.b_slowpath[c1] + def set_b_slowpath(self, c1, addr): + self.b_slowpath = (self.b_slowpath[:c1] + (addr,) + + self.b_slowpath[c1+1:]) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1032,19 +1032,22 @@ cb.emit_no_collect() def _reload_frame_if_necessary(self, mc, align_stack=False): - gcrootmap = self.cpu.gc_ll_descr.gcrootmap + gc_ll_descr = self.cpu.gc_ll_descr + gcrootmap = gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: rst = gcrootmap.get_root_stack_top_addr() mc.MOV(ecx, heap(rst)) mc.MOV(ebp, mem(ecx, -WORD)) if gcrootmap and gcrootmap.is_stm: - wbdescr = self.cpu.gc_ll_descr.P2Wdescr + if not hasattr(gc_ll_descr, 'P2Wdescr'): + raise Exception("unreachable code") + wbdescr = gc_ll_descr.P2Wdescr self._stm_barrier_fastpath(mc, wbdescr, [ebp], is_frame=True, align_stack=align_stack) return - wbdescr = self.cpu.gc_ll_descr.write_barrier_descr + wbdescr = gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not # an array diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -825,7 +825,7 @@ def consider_call_malloc_nursery_varsize_frame(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr - assert gc_ll_descr.max_size_of_young_obj is not None + assert gc_ll_descr.get_malloc_slowpath_addr() is not None # ^^^ if this returns None, don't translate the rest of this function # size_box = op.getarg(0) @@ -850,10 +850,8 @@ def consider_call_malloc_nursery_varsize(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr - assert gc_ll_descr.max_size_of_young_obj is not None - # ^^^ if this returns None, don't translate the rest of this function - # - if not hasattr(gc_ll_descr, 'max_size_of_young_obj'): + if not hasattr(gc_ll_descr, 'max_size_of_young_obj') \ + or gc_ll_descr.max_size_of_young_obj is None: raise Exception("unreachable code") # for boehm, this function should never be called arraydescr = op.getdescr() diff --git a/rpython/jit/backend/x86/test/test_zrpy_gc.py b/rpython/jit/backend/x86/test/test_zrpy_gc.py --- a/rpython/jit/backend/x86/test/test_zrpy_gc.py +++ b/rpython/jit/backend/x86/test/test_zrpy_gc.py @@ -1,5 +1,9 @@ from rpython.jit.backend.llsupport.test.zrpy_gc_test import CompileFrameworkTests +class TestSTMShadowStack(CompileFrameworkTests): + gcrootfinder = "stm" + + class TestShadowStack(CompileFrameworkTests): gcrootfinder = "shadowstack" diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,5 +1,6 @@ from rpython.translator.simplify import get_graph, get_funcobj from rpython.tool.algo.unionfind import UnionFind +from rpython.rtyper.lltypesystem import lltype class GraphAnalyzer(object): From noreply at buildbot.pypy.org Mon Jul 15 16:17:54 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 15 Jul 2013 16:17:54 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix and add more tests Message-ID: <20130715141754.1E13D1C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65402:586771804a5c Date: 2013-07-15 16:16 +0200 http://bitbucket.org/pypy/pypy/changeset/586771804a5c/ Log: fix and add more tests diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -315,6 +315,9 @@ exc0, exc1 = None, None if descr is None: return + + if is_stm and withcards: + return if not withcards: func = descr.get_barrier_fn(self.cpu, diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -23,7 +23,7 @@ class transformerclass(ShadowStackFrameworkGCTransformer): root_stack_depth = 100 -def test_framework_simple(): +def test_framework_simple(gc="minimark"): def g(x): return x + 1 class A(object): @@ -37,7 +37,10 @@ from rpython.translator.c.genc import CStandaloneBuilder t = rtype(entrypoint, [s_list_of_strings]) - t.config.translation.gc = "minimark" + if gc == "stmgc": + t.config.translation.stm = True + t.config.translation.gc = gc + cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) db = cbuild.generate_graphs_for_llinterp() @@ -54,6 +57,9 @@ assert res == 2 +def test_framework_simple_stm(): + test_framework_simple("stmgc") + def test_cancollect(): S = lltype.GcStruct('S', ('x', lltype.Signed)) def g(): @@ -94,7 +100,7 @@ gg = graphof(t, g) assert CollectAnalyzer(t).analyze_direct_call(gg) -def test_no_collect(): +def test_no_collect(gc="minimark"): from rpython.rlib import rgc from rpython.translator.c.genc import CStandaloneBuilder @@ -109,12 +115,17 @@ return g() + 2 t = rtype(entrypoint, [s_list_of_strings]) - t.config.translation.gc = "minimark" + if gc == "stmgc": + t.config.translation.stm = True + t.config.translation.gc = gc cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) db = cbuild.generate_graphs_for_llinterp() -def test_no_collect_detection(): +def test_no_collect_stm(): + test_no_collect("stmgc") + +def test_no_collect_detection(gc="minimark"): from rpython.rlib import rgc from rpython.translator.c.genc import CStandaloneBuilder @@ -133,13 +144,18 @@ return g() + 2 t = rtype(entrypoint, [s_list_of_strings]) - t.config.translation.gc = "minimark" + if gc == "stmgc": + t.config.translation.stm = True + t.config.translation.gc = gc cbuild = CStandaloneBuilder(t, entrypoint, t.config, gcpolicy=FrameworkGcPolicy2) f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp) expected = "'no_collect' function can trigger collection: Author: Armin Rigo Branch: weakref Changeset: r395:c05f9e0b6db3 Date: 2013-07-15 12:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/c05f9e0b6db3/ Log: A branch to implement weakref support (as small immutable objects only, like needed by PyPy) From noreply at buildbot.pypy.org Mon Jul 15 16:29:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 15 Jul 2013 16:29:17 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: Starting Message-ID: <20130715142917.7CC631C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: weakref Changeset: r396:e0e0b65b0f26 Date: 2013-07-15 16:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/e0e0b65b0f26/ Log: Starting diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -54,6 +54,7 @@ gcptrlist_delete(&d->old_objects_to_trace); gcptrlist_delete(&d->public_with_young_copy); + gcptrlist_delete(&d->young_weakrefs); } void stmgc_minor_collect_soon(void) @@ -100,6 +101,15 @@ return P; } +gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj) +{ + gcptr weakref = stm_allocate(size, tid); + assert(stmgc_size(weakref) == size); + WEAKREF_PTR(weakref, size) = obj; + gcptrlist_insert(&thread_descriptor->young_weakrefs, weakref); + return weakref; +} + gcptr stmgc_duplicate(gcptr P) { size_t size = stmgc_size(P); @@ -429,6 +439,19 @@ fxcache_clear(&d->recent_reads_cache); } +static void move_young_weakrefs(struct tx_descriptor *d) +{ + while (gcptrlist_size(&d->young_weakrefs) > 0) { + gcptr weakref = gcptrlist_pop(&d->young_weakrefs); + if (!(weakref->h_tid & GCFLAG_NURSERY_MOVED)) + continue; /* the weakref itself dies */ + + weakref = (gcptr)weakref->h_revision; + size_t size = stmgc_size(weakref); + WEAKREF_PTR(weakref, size) = NULL; /* XXX */ + } +} + static void setup_minor_collect(struct tx_descriptor *d) { spinlock_acquire(d->public_descriptor->collection_lock, 'M'); /*minor*/ @@ -440,6 +463,7 @@ { //assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); + assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); spinlock_release(d->public_descriptor->collection_lock); @@ -475,6 +499,8 @@ surviving young-but-outside-the-nursery objects have been flagged with GCFLAG_OLD */ + move_young_weakrefs(d); + teardown_minor_collect(d); assert(!stm_has_got_any_lock(d)); @@ -541,6 +567,7 @@ !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); + assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->list_of_read_objects) >= d->num_read_objects_known_old); assert(gcptrlist_size(&d->private_from_protected) >= diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -50,7 +50,10 @@ still in the same transaction, to know that the initial \ part of the lists cannot contain young objects any more. */ \ long num_private_from_protected_known_old; \ - long num_read_objects_known_old; + long num_read_objects_known_old; \ + \ + /* Weakref support */ \ + struct GcPtrList young_weakrefs; struct tx_descriptor; /* from et.h */ @@ -65,4 +68,7 @@ void stmgc_trace(gcptr, void visit(gcptr *)); void stmgc_minor_collect_soon(void); +#define WEAKREF_PTR(wr, sz) (*(gcptr *)(((char *)(wr)) + (sz) - WORD)) + + #endif diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -39,10 +39,8 @@ _Bool stm_pointer_equal(gcptr, gcptr); /* to push/pop objects into the local shadowstack */ -#if 0 // (optimized version below) -void stm_push_root(gcptr); -gcptr stm_pop_root(void); -#endif +static inline void stm_push_root(gcptr); +static inline gcptr stm_pop_root(void); /* initialize/deinitialize the stm framework in the current thread */ void stm_initialize(void); @@ -55,16 +53,14 @@ void stm_leave_callback_call(int); /* read/write barriers (the most general versions only for now) */ -#if 0 // (optimized version below) -gcptr stm_read_barrier(gcptr); -gcptr stm_write_barrier(gcptr); -#endif +static inline gcptr stm_read_barrier(gcptr); +static inline gcptr stm_write_barrier(gcptr); /* start a new transaction, calls callback(), and when it returns finish that transaction. callback() is called with the 'arg' provided, and with a retry_counter number. Must save roots around - this call. The callback() is called repeatedly as long as it - returns a value > 0. */ + this call. If callback() returns a value > 0, it is called + again. */ void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int)); /* finish the current transaction, start a new one, or turn the current @@ -114,6 +110,12 @@ void stm_minor_collect(void); void stm_major_collect(void); +/* weakref support: allocate a weakref object, and set it to point + weakly to 'obj'. The weak pointer offset is hard-coded to be at + 'size - WORD'. Important: stmcb_trace() must NOT trace it. */ +gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); + + /**************** END OF PUBLIC INTERFACE *****************/ /************************************************************/ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -46,7 +46,7 @@ #define PREBUILT_FLAGS ... #define PREBUILT_REVISION ... - gcptr stm_allocate(size_t size, unsigned int tid); + gcptr stm_allocate(size_t size, unsigned long tid); revision_t stm_hash(gcptr); revision_t stm_id(gcptr); _Bool stm_pointer_equal(gcptr, gcptr); @@ -69,6 +69,7 @@ void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); void stm_abort_and_retry(void); + gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); /* extra non-public code */ void printfcolor(char *msg); @@ -164,14 +165,18 @@ gcptr rawgetptr(gcptr obj, long index) { - assert(gettid(obj) > 42142 + index); + revision_t t = gettid(obj); + if (t == 42142) t++; + assert(t > 42142 + index); return ((gcptr *)(obj + 1))[index]; } void rawsetptr(gcptr obj, long index, gcptr newvalue) { fprintf(stderr, "%p->[%ld] = %p\n", obj, index, newvalue); - assert(gettid(obj) > 42142 + index); + revision_t t = gettid(obj); + if (t == 42142) t++; + assert(t > 42142 + index); ((gcptr *)(obj + 1))[index] = newvalue; } @@ -282,6 +287,8 @@ else { int nrefs = gettid(obj) - 42142; assert(nrefs < 100); + if (nrefs == 0) /* weakrefs */ + nrefs = 1; return sizeof(*obj) + nrefs * sizeof(gcptr); } } @@ -484,7 +491,7 @@ def oalloc_refs(nrefs): """Allocate an 'old' protected object, outside any nursery, with nrefs pointers""" - size = HDR + WORD * nrefs + size = HDR + WORD * (nrefs or 1) p = lib.stmgcpage_malloc(size) lib.memset(p, 0, size) p.h_tid = GCFLAG_OLD | GCFLAG_WRITE_BARRIER @@ -506,9 +513,9 @@ def nalloc_refs(nrefs): "Allocate a fresh object from the nursery, with nrefs pointers" - p = lib.stm_allocate(HDR + WORD * nrefs, 42142 + nrefs) + p = lib.stm_allocate(HDR + WORD * (nrefs or 1), 42142 + nrefs) assert p.h_revision == lib.get_private_rev_num() - for i in range(nrefs): + for i in range(nrefs or 1): assert rawgetptr(p, i) == ffi.NULL # must already be zero-filled return p @@ -524,9 +531,9 @@ def palloc_refs(nrefs, prehash=None): "Get a ``prebuilt'' object with nrefs pointers." if prehash is None: - p = lib.pseudoprebuilt(HDR + WORD * nrefs, 42142 + nrefs) + p = lib.pseudoprebuilt(HDR + WORD * (nrefs or 1), 42142 + nrefs) else: - p = lib.pseudoprebuilt_with_hash(HDR + WORD * nrefs, + p = lib.pseudoprebuilt_with_hash(HDR + WORD * (nrefs or 1), 42142 + nrefs, prehash) return p @@ -684,5 +691,8 @@ should_break_transaction = lib.stm_should_break_transaction - +WEAKREF_SIZE = HDR + WORD +WEAKREF_TID = 42142 + + nrb_protected = ffi.cast("gcptr", -1) diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -319,3 +319,14 @@ def test_collect_soon(): lib.stmgc_minor_collect_soon() nalloc(HDR) + +def test_weakref_invalidate(): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + minor_collect() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == ffi.NULL From noreply at buildbot.pypy.org Mon Jul 15 16:32:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 15 Jul 2013 16:32:06 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: A weakref-kept-alive test Message-ID: <20130715143206.0472E1C1380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: weakref Changeset: r397:e2f68af5cc4e Date: 2013-07-15 16:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/e2f68af5cc4e/ Log: A weakref-kept-alive test diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -448,7 +448,13 @@ weakref = (gcptr)weakref->h_revision; size_t size = stmgc_size(weakref); - WEAKREF_PTR(weakref, size) = NULL; /* XXX */ + gcptr obj = WEAKREF_PTR(weakref, size); + + if (obj->h_tid & GCFLAG_NURSERY_MOVED) + obj = obj->h_revision; + else + obj = NULL; + WEAKREF_PTR(weakref, size) = obj; } } diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -330,3 +330,21 @@ minor_collect() p1 = lib.stm_pop_root() assert lib.rawgetptr(p1, 0) == ffi.NULL + +def test_weakref_itself_dies(): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + minor_collect() + +def test_weakref_keep(): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + minor_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 From noreply at buildbot.pypy.org Mon Jul 15 16:37:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 15 Jul 2013 16:37:20 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: Weakrefs to old objects Message-ID: <20130715143720.F0F571C1380@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: weakref Changeset: r398:afdffd3e0b36 Date: 2013-07-15 16:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/afdffd3e0b36/ Log: Weakrefs to old objects diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -449,6 +449,8 @@ weakref = (gcptr)weakref->h_revision; size_t size = stmgc_size(weakref); gcptr obj = WEAKREF_PTR(weakref, size); + if (!is_in_nursery(d, obj)) + continue; /* the pointer does not change */ if (obj->h_tid & GCFLAG_NURSERY_MOVED) obj = obj->h_revision; diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -348,3 +348,16 @@ p2 = lib.stm_pop_root() p1 = lib.stm_pop_root() assert lib.rawgetptr(p1, 0) == p2 + +def test_weakref_old_keep(): + p2 = oalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + minor_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 From noreply at buildbot.pypy.org Mon Jul 15 17:17:26 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 15 Jul 2013 17:17:26 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix one bug and use a "big stub" in case it is also needed as the h_original of an object Message-ID: <20130715151726.0CB001C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r399:6d83a39b28cf Date: 2013-07-15 17:16 +0200 http://bitbucket.org/pypy/stmgc/changeset/6d83a39b28cf/ Log: fix one bug and use a "big stub" in case it is also needed as the h_original of an object diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -75,7 +75,7 @@ // helper functions int classify(gcptr p); void check(gcptr p); - +int in_nursery(gcptr obj); static int is_private(gcptr P) { return (P->h_revision == stm_private_rev_num) || @@ -226,8 +226,7 @@ if (p->h_original && !(p->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { // must point to valid old object gcptr id = (gcptr)p->h_original; - assert(id->h_tid & GCFLAG_OLD); - check_not_free(id); + assert(!in_nursery(id)); #ifdef _GC_DEBUG if (!is_shared_prebuilt(id) && !(id->h_tid & GCFLAG_PREBUILT)) assert(!is_free_old(id)); diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -107,10 +107,12 @@ else { /* must create shadow original object XXX: or use backup, if exists */ - - /* XXX use stmgcpage_malloc() directly, we don't need to copy - * the contents yet */ - gcptr O = stmgc_duplicate_old(p); + gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p)); + memcpy(O, p, stmgc_size(p)); /* at least major collections + depend on some content of id_copy. + remove after fixing that XXX */ + O->h_tid |= GCFLAG_OLD; + p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -39,7 +39,21 @@ goto done; not_found: - stub = stm_stub_malloc(sd->foreign_pd); + if (!obj->h_original && !(obj->h_tid & GCFLAG_OLD)) { + /* There shouldn't be a public, young object without + a h_original. But there can be priv/protected ones. + We have a young protected copy without an h_original + The stub we allocate will be the h_original, but + it must be big enough to be copied over by a major + collection later. */ + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + + stub = (gcptr)stmgcpage_malloc(stmgc_size(obj)); + STUB_THREAD(stub) = sd->foreign_pd; + } + else { + stub = stm_stub_malloc(sd->foreign_pd); + } stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; @@ -51,10 +65,9 @@ stub->h_original = (revision_t)obj; } else { - /* There shouldn't be a public, young object without - a h_original. But there can be protected ones. */ - assert(!(obj->h_tid & GCFLAG_PUBLIC)); - obj->h_original = (revision_t)stub; + /* this is the big-stub case described above */ + obj->h_original = (revision_t)stub; + stub->h_original = 0; /* stub_malloc does not set to 0... */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { ((gcptr)obj->h_revision)->h_original = (revision_t)stub; } From noreply at buildbot.pypy.org Mon Jul 15 18:00:36 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 15 Jul 2013 18:00:36 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: fixed a bug with type conversion when interpreting Message-ID: <20130715160036.113401C0205@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r495:50042e3907a0 Date: 2013-07-15 14:20 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/50042e3907a0/ Log: fixed a bug with type conversion when interpreting diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -284,7 +284,7 @@ if shift == 0: return self # a problem might arrise, because we may shift in ones from left - mask = (1 << (32 - shift))- 1 + mask = intmask((1 << (32 - shift))- 1) # the mask is only valid if the highest bit of self.value is set # and only in this case we do need such a mask return space.wrap_int((self.value >> shift) & mask) From noreply at buildbot.pypy.org Mon Jul 15 18:00:38 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 15 Jul 2013 18:00:38 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: improvements to the benchmarking code in the image, an unknown fix to bitblt by tim Message-ID: <20130715160038.1EC371C0205@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r496:011594b37fd4 Date: 2013-07-15 14:26 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/011594b37fd4/ Log: improvements to the benchmarking code in the image, an unknown fix to bitblt by tim diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index 9171a81055a32b5c93cf278576525da9af88c942..fa4c1e60de2e4482f2b3308297fef4b73835ef1e GIT binary patch [cut] From noreply at buildbot.pypy.org Mon Jul 15 18:18:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 15 Jul 2013 18:18:59 +0200 (CEST) Subject: [pypy-commit] stmgc default: Potential bug: stmgc_size() can return a number smaller than needed Message-ID: <20130715161859.3C49F1C101D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r400:d609310cc4d0 Date: 2013-07-15 18:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/d609310cc4d0/ Log: Potential bug: stmgc_size() can return a number smaller than needed for a stub. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1091,7 +1091,7 @@ #endif L->h_revision = new_revision; - gcptr stub = stm_stub_malloc(d->public_descriptor); + gcptr stub = stm_stub_malloc(d->public_descriptor, 0); stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -1,11 +1,13 @@ #include "stmimpl.h" -gcptr stm_stub_malloc(struct tx_public_descriptor *pd) +gcptr stm_stub_malloc(struct tx_public_descriptor *pd, size_t minsize) { assert(pd->collection_lock != 0); + if (minsize < sizeof(struct stm_stub_s)) + minsize = sizeof(struct stm_stub_s); - gcptr p = stmgcpage_malloc(sizeof(struct stm_stub_s)); + gcptr p = stmgcpage_malloc(minsize); STUB_THREAD(p) = pd; return p; } @@ -38,7 +40,8 @@ assert(stub->h_revision == (((revision_t)obj) | 2)); goto done; - not_found: + not_found:; + size_t size = 0; if (!obj->h_original && !(obj->h_tid & GCFLAG_OLD)) { /* There shouldn't be a public, young object without a h_original. But there can be priv/protected ones. @@ -48,12 +51,9 @@ collection later. */ assert(!(obj->h_tid & GCFLAG_PUBLIC)); - stub = (gcptr)stmgcpage_malloc(stmgc_size(obj)); - STUB_THREAD(stub) = sd->foreign_pd; + size = stmgc_size(obj); } - else { - stub = stm_stub_malloc(sd->foreign_pd); - } + stub = stm_stub_malloc(sd->foreign_pd, size); stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; diff --git a/c4/steal.h b/c4/steal.h --- a/c4/steal.h +++ b/c4/steal.h @@ -9,7 +9,7 @@ #define STUB_THREAD(h) (((struct stm_stub_s *)(h))->s_thread) -gcptr stm_stub_malloc(struct tx_public_descriptor *); +gcptr stm_stub_malloc(struct tx_public_descriptor *, size_t minsize); void stm_steal_stub(gcptr); gcptr stm_get_stolen_obj(long index); /* debugging */ void stm_normalize_stolen_objects(struct tx_descriptor *); From noreply at buildbot.pypy.org Mon Jul 15 18:44:40 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 15 Jul 2013 18:44:40 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: merge default into branch Message-ID: <20130715164440.0915B1C0205@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65403:f050a9218874 Date: 2013-07-14 20:15 +0300 http://bitbucket.org/pypy/pypy/changeset/f050a9218874/ Log: merge default into branch diff too long, truncating to 2000 out of 8063 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -219,32 +282,32 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -306,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -322,8 +335,35 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -32,11 +32,10 @@ * go to pypy/tool/release and run: force-builds.py /release/ * wait for builds to complete, make sure there are no failures -* run pypy/tool/release/make_release.py, this will build necessary binaries - and upload them to pypy.org +* upload binaries to https://bitbucket.org/pypy/pypy/downloads Following binaries should be built, however, we need more buildbots: - JIT: windows, linux, os/x + JIT: windows, linux, os/x, armhf, armel no JIT: windows, linux, os/x sandbox: linux, os/x diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,72 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture + +* Stacklet support on ARM + +* Interpreter improvements + +* Various numpy improvements + +* Bugfixes to cffi and ctypes + +* Bugfixes to the stacklet support + +* Improved logging performance + +* Faster sets for objects + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,77 +2,25 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: ndarray-ptp -put and array.put +.. branch: fastjson +Fast json decoder written in RPython, about 3-4x faster than the pure Python +decoder which comes with the stdlib -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) +.. branch: improve-str2charp +Improve the performance of I/O writing up to 15% by using memcpy instead of +copying char-by-char in str2charp and get_nonmovingbuffer -.. branch: remove-array-smm -Remove multimethods in the arraymodule +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. -.. branch: remove-set-smm -Remove multi-methods on sets - -.. branch: numpy-subarrays -Implement subarrays for numpy - -.. branch: remove-dict-smm -Remove multi-methods on dict - -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative -.. branch: ndarray-round - -.. branch: faster-str-of-bigint -Improve performance of str(long). - -.. branch: ndarray-view -Add view to ndarray and zeroD arrays, not on dtype scalars yet - -.. branch: numpypy-segfault -fix segfault caused by iterating over empty ndarrays - -.. branch: identity-set -Faster sets for objects - -.. branch: inline-identityhash -Inline the fast path of id() and hash() +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -881,15 +881,15 @@ assert "0 ('hi')" not in output.getvalue() def test_print_to(self): - exec """if 1: - from StringIO import StringIO - s = StringIO() - print >> s, "hi", "lovely!" - assert s.getvalue() == "hi lovely!\\n" - s = StringIO() - print >> s, "hi", "lovely!", - assert s.getvalue() == "hi lovely!" - """ in {} + exec """if 1: + from StringIO import StringIO + s = StringIO() + print >> s, "hi", "lovely!" + assert s.getvalue() == "hi lovely!\\n" + s = StringIO() + print >> s, "hi", "lovely!", + assert s.getvalue() == "hi lovely!" + """ in {} def test_assert_with_tuple_arg(self): try: diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -371,7 +371,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -111,7 +111,7 @@ enc = None if need_encoding: - enc = encoding + enc = encoding v = PyString_DecodeEscape(space, substr, enc) return space.wrap(v) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -636,11 +636,11 @@ assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL if i < 5: - def bomb(*args): - assert False, "shortcutting should have avoided this" + def bomb(*args): + assert False, "shortcutting should have avoided this" - code.funcrun = bomb - code.funcrun_obj = bomb + code.funcrun = bomb + code.funcrun_obj = bomb args_w = map(space.wrap, range(i)) w_res = space.call_function(fn, *args_w) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -73,7 +73,7 @@ def f(): def f(y): - return x + y + return x + y return f x = 1 @@ -85,7 +85,7 @@ if n: x = 42 def f(y): - return x + y + return x + y return f g0 = f(0).func_closure[0] diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -226,6 +226,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -102,26 +102,26 @@ } def pick_builtin(self, w_globals): - "Look up the builtin module to use from the __builtins__ global" - # pick the __builtins__ roughly in the same way CPython does it - # this is obscure and slow - space = self.space - try: - w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - else: - if w_builtin is space.builtin: # common case - return space.builtin - if space.isinstance_w(w_builtin, space.w_dict): + "Look up the builtin module to use from the __builtins__ global" + # pick the __builtins__ roughly in the same way CPython does it + # this is obscure and slow + space = self.space + try: + w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + else: + if w_builtin is space.builtin: # common case + return space.builtin + if space.isinstance_w(w_builtin, space.w_dict): return module.Module(space, None, w_builtin) - if isinstance(w_builtin, module.Module): - return w_builtin - # no builtin! make a default one. Give them None, at least. - builtin = module.Module(space, None) - space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) - return builtin + if isinstance(w_builtin, module.Module): + return w_builtin + # no builtin! make a default one. Give them None, at least. + builtin = module.Module(space, None) + space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) + return builtin def setup_after_space_initialization(self): """NOT_RPYTHON""" diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -52,7 +52,8 @@ HAS = rffi_platform.Has("setupterm") if rffi_platform.configure(CConfig)['HAS']: return eci - raise ImportError("failed to guess where ncurses is installed") + raise ImportError("failed to guess where ncurses is installed. " + "You might need to install libncurses5-dev or similar.") eci = guess_eci() diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/__init__.py @@ -0,0 +1,10 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """fast json implementation""" + + appleveldefs = {} + + interpleveldefs = { + 'loads' : 'interp_decoder.loads', + } diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -0,0 +1,404 @@ +import sys +import math +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize +from rpython.rlib import rfloat +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import unicodehelper +from rpython.rtyper.annlowlevel import llstr, hlunicode + +OVF_DIGITS = len(str(sys.maxint)) + +def is_whitespace(ch): + return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' + +# precomputing negative powers of 10 is MUCH faster than using e.g. math.pow +# at runtime +NEG_POW_10 = [10.0**-i for i in range(16)] +def neg_pow_10(x, exp): + if exp >= len(NEG_POW_10): + return 0.0 + return x * NEG_POW_10[exp] + +def strslice2unicode_latin1(s, start, end): + """ + Convert s[start:end] to unicode. s is supposed to be an RPython string + encoded in latin-1, which means that the numeric value of each char is the + same as the corresponding unicode code point. + + Internally it's implemented at the level of low-level helpers, to avoid + the extra copy we would need if we take the actual slice first. + + No bound checking is done, use carefully. + """ + from rpython.rtyper.annlowlevel import llstr, hlunicode + from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE + from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar + length = end-start + ll_s = llstr(s) + ll_res = malloc(UNICODE, length) + ll_res.hash = 0 + for i in range(length): + ch = ll_s.chars[start+i] + ll_res.chars[i] = cast_primitive(UniChar, ch) + return hlunicode(ll_res) + +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 +class JSONDecoder(object): + def __init__(self, space, s): + self.space = space + self.s = s + # we put our string in a raw buffer so: + # 1) we automatically get the '\0' sentinel at the end of the string, + # which means that we never have to check for the "end of string" + # 2) we can pass the buffer directly to strtod + self.ll_chars = rffi.str2charp(s) + self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + self.pos = 0 + self.last_type = TYPE_UNKNOWN + + def close(self): + rffi.free_charp(self.ll_chars) + lltype.free(self.end_ptr, flavor='raw') + + def getslice(self, start, end): + assert start >= 0 + assert end >= 0 + return self.s[start:end] + + def skip_whitespace(self, i): + while True: + ch = self.ll_chars[i] + if is_whitespace(ch): + i+=1 + else: + break + return i + + @specialize.arg(1) + def _raise(self, msg, *args): + raise operationerrfmt(self.space.w_ValueError, msg, *args) + + def decode_any(self, i): + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + if ch == '"': + return self.decode_string(i+1) + elif ch == '[': + return self.decode_array(i+1) + elif ch == '{': + return self.decode_object(i+1) + elif ch == 'n': + return self.decode_null(i+1) + elif ch == 't': + return self.decode_true(i+1) + elif ch == 'f': + return self.decode_false(i+1) + elif ch == 'I': + return self.decode_infinity(i+1) + elif ch == 'N': + return self.decode_nan(i+1) + elif ch == '-': + if self.ll_chars[i+1] == 'I': + return self.decode_infinity(i+2, sign=-1) + return self.decode_numeric(i) + elif ch.isdigit(): + return self.decode_numeric(i) + else: + self._raise("No JSON object could be decoded: unexpected '%s' at char %d", + ch, self.pos) + + def decode_null(self, i): + if (self.ll_chars[i] == 'u' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 'l'): + self.pos = i+3 + return self.space.w_None + self._raise("Error when decoding null at char %d", i) + + def decode_true(self, i): + if (self.ll_chars[i] == 'r' and + self.ll_chars[i+1] == 'u' and + self.ll_chars[i+2] == 'e'): + self.pos = i+3 + return self.space.w_True + self._raise("Error when decoding true at char %d", i) + + def decode_false(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 's' and + self.ll_chars[i+3] == 'e'): + self.pos = i+4 + return self.space.w_False + self._raise("Error when decoding false at char %d", i) + + def decode_infinity(self, i, sign=1): + if (self.ll_chars[i] == 'n' and + self.ll_chars[i+1] == 'f' and + self.ll_chars[i+2] == 'i' and + self.ll_chars[i+3] == 'n' and + self.ll_chars[i+4] == 'i' and + self.ll_chars[i+5] == 't' and + self.ll_chars[i+6] == 'y'): + self.pos = i+7 + return self.space.wrap(rfloat.INFINITY * sign) + self._raise("Error when decoding Infinity at char %d", i) + + def decode_nan(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'N'): + self.pos = i+2 + return self.space.wrap(rfloat.NAN) + self._raise("Error when decoding NaN at char %d", i) + + def decode_numeric(self, i): + start = i + i, ovf_maybe, intval = self.parse_integer(i) + # + # check for the optional fractional part + ch = self.ll_chars[i] + if ch == '.': + if not self.ll_chars[i+1].isdigit(): + self._raise("Expected digit at char %d", i+1) + return self.decode_float(start) + elif ch == 'e' or ch == 'E': + return self.decode_float(start) + elif ovf_maybe: + return self.decode_int_slow(start) + + self.pos = i + return self.space.wrap(intval) + + def decode_float(self, i): + from rpython.rlib import rdtoa + start = rffi.ptradd(self.ll_chars, i) + floatval = rdtoa.dg_strtod(start, self.end_ptr) + diff = rffi.cast(rffi.LONG, self.end_ptr[0]) - rffi.cast(rffi.LONG, start) + self.pos = i + diff + return self.space.wrap(floatval) + + def decode_int_slow(self, i): + start = i + if self.ll_chars[i] == '-': + i += 1 + while self.ll_chars[i].isdigit(): + i += 1 + s = self.getslice(start, i) + self.pos = i + return self.space.call_function(self.space.w_int, self.space.wrap(s)) + + def parse_integer(self, i): + "Parse a decimal number with an optional minus sign" + sign = 1 + # parse the sign + if self.ll_chars[i] == '-': + sign = -1 + i += 1 + elif self.ll_chars[i] == '+': + i += 1 + # + if self.ll_chars[i] == '0': + i += 1 + return i, False, 0 + + intval = 0 + start = i + while True: + ch = self.ll_chars[i] + if ch.isdigit(): + intval = intval*10 + ord(ch)-ord('0') + i += 1 + else: + break + count = i - start + if count == 0: + self._raise("Expected digit at char %d", i) + # if the number has more digits than OVF_DIGITS, it might have + # overflowed + ovf_maybe = (count >= OVF_DIGITS) + return i, ovf_maybe, sign * intval + parse_integer._always_inline_ = True + + def decode_array(self, i): + w_list = self.space.newlist([]) + start = i + count = 0 + i = self.skip_whitespace(start) + if self.ll_chars[i] == ']': + self.pos = i+1 + return w_list + # + while True: + w_item = self.decode_any(i) + i = self.pos + self.space.call_method(w_list, 'append', w_item) + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + i += 1 + if ch == ']': + self.pos = i + return w_list + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated array starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding array (char %d)", + ch, self.pos) + + def decode_object(self, i): + start = i + w_dict = self.space.newdict() + # + i = self.skip_whitespace(i) + if self.ll_chars[i] == '}': + self.pos = i+1 + return w_dict + # + while True: + # parse a key: value + self.last_type = TYPE_UNKNOWN + w_name = self.decode_any(i) + if self.last_type != TYPE_STRING: + self._raise("Key name must be string for object starting at char %d", start) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + if ch != ':': + self._raise("No ':' found at char %d", i) + i += 1 + i = self.skip_whitespace(i) + # + w_value = self.decode_any(i) + self.space.setitem(w_dict, w_name, w_value) + i = self.skip_whitespace(self.pos) + ch = self.ll_chars[i] + i += 1 + if ch == '}': + self.pos = i + return w_dict + elif ch == ',': + pass + elif ch == '\0': + self._raise("Unterminated object starting at char %d", start) + else: + self._raise("Unexpected '%s' when decoding object (char %d)", + ch, self.pos) + + + def decode_string(self, i): + start = i + bits = 0 + while True: + # this loop is a fast path for strings which do not contain escape + # characters + ch = self.ll_chars[i] + i += 1 + bits |= ord(ch) + if ch == '"': + if bits & 0x80: + # the 8th bit is set, it's an utf8 strnig + content_utf8 = self.getslice(start, i-1) + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + else: + # ascii only, fast path (ascii is a strict subset of + # latin1, and we already checked that all the chars are < + # 128) + content_unicode = strslice2unicode_latin1(self.s, start, i-1) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + content_so_far = self.getslice(start, i-1) + self.pos = i-1 + return self.decode_string_escaped(start, content_so_far) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + + + def decode_string_escaped(self, start, content_so_far): + builder = StringBuilder(len(content_so_far)*2) # just an estimate + builder.append(content_so_far) + i = self.pos + while True: + ch = self.ll_chars[i] + i += 1 + if ch == '"': + content_utf8 = builder.build() + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + self.last_type = TYPE_STRING + self.pos = i + return self.space.wrap(content_unicode) + elif ch == '\\': + i = self.decode_escape_sequence(i, builder) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) + else: + builder.append_multiple_char(ch, 1) # we should implement append_char + + def decode_escape_sequence(self, i, builder): + ch = self.ll_chars[i] + i += 1 + put = builder.append_multiple_char + if ch == '\\': put('\\', 1) + elif ch == '"': put('"' , 1) + elif ch == '/': put('/' , 1) + elif ch == 'b': put('\b', 1) + elif ch == 'f': put('\f', 1) + elif ch == 'n': put('\n', 1) + elif ch == 'r': put('\r', 1) + elif ch == 't': put('\t', 1) + elif ch == 'u': + return self.decode_escape_sequence_unicode(i, builder) + else: + self._raise("Invalid \\escape: %s (char %d)", ch, self.pos-1) + return i + + def decode_escape_sequence_unicode(self, i, builder): + # at this point we are just after the 'u' of the \u1234 sequence. + start = i + i += 4 + hexdigits = self.getslice(start, i) + try: + val = int(hexdigits, 16) + if val & 0xfc00 == 0xd800: + # surrogate pair + val = self.decode_surrogate_pair(i, val) + i += 6 + except ValueError: + self._raise("Invalid \uXXXX escape (char %d)", i-1) + return # help the annotator to know that we'll never go beyond + # this point + # + uchr = unichr(val) + utf8_ch = unicodehelper.encode_utf8(self.space, uchr) + builder.append(utf8_ch) + return i + + def decode_surrogate_pair(self, i, highsurr): + if self.ll_chars[i] != '\\' or self.ll_chars[i+1] != 'u': + self._raise("Unpaired high surrogate at char %d", i) + i += 2 + hexdigits = self.getslice(i, i+4) + lowsurr = int(hexdigits, 16) # the possible ValueError is caugth by the caller + return 0x10000 + (((highsurr - 0xd800) << 10) | (lowsurr - 0xdc00)) + +def loads(space, w_s): + if space.isinstance_w(w_s, space.w_unicode): + raise OperationError(space.w_TypeError, + space.wrap("Expected utf8-encoded str, got unicode")) + s = space.str_w(w_s) + decoder = JSONDecoder(space, s) + try: + w_res = decoder.decode_any(0) + i = decoder.skip_whitespace(decoder.pos) + if i < len(s): + start = i + end = len(s) - 1 + raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) + return w_res + finally: + decoder.close() diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/targetjson.py @@ -0,0 +1,143 @@ +import sys +import py +ROOT = py.path.local(__file__).dirpath('..', '..', '..') +sys.path.insert(0, str(ROOT)) + +import time +from rpython.rlib.streamio import open_file_as_stream +from pypy.interpreter.error import OperationError +from pypy.module._pypyjson.interp_decoder import loads + + + +## MSG = open('msg.json').read() + +class W_Root(object): + pass + +class W_Dict(W_Root): + def __init__(self): + self.dictval = {} + +class W_Unicode(W_Root): + def __init__(self, x): + self.unival = x + +class W_String(W_Root): + def __init__(self, x): + self.strval = x + +class W_Int(W_Root): + def __init__(self, x): + self.intval = x + +class W_Float(W_Root): + def __init__(self, x): + self.floatval = x + +class W_List(W_Root): + def __init__(self): + self.listval = [] + +class W_Singleton(W_Root): + def __init__(self, name): + self.name = name + +class FakeSpace(object): + + w_None = W_Singleton('None') + w_True = W_Singleton('True') + w_False = W_Singleton('False') + w_ValueError = W_Singleton('ValueError') + w_UnicodeDecodeError = W_Singleton('UnicodeDecodeError') + w_unicode = W_Unicode + w_int = W_Int + w_float = W_Float + + def newtuple(self, items): + return None + + def newdict(self): + return W_Dict() + + def newlist(self, items): + return W_List() + + def isinstance_w(self, w_x, w_type): + return isinstance(w_x, w_type) + + def str_w(self, w_x): + assert isinstance(w_x, W_String) + return w_x.strval + + def call_method(self, obj, name, arg): + assert name == 'append' + assert isinstance(obj, W_List) + obj.listval.append(arg) + call_method._dont_inline_ = True + + def call_function(self, w_func, *args_w): + return self.w_None # XXX + + def setitem(self, d, key, value): + assert isinstance(d, W_Dict) + assert isinstance(key, W_Unicode) + d.dictval[key.unival] = value + + def wrapunicode(self, x): From noreply at buildbot.pypy.org Mon Jul 15 18:44:41 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 15 Jul 2013 18:44:41 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: write more subtype tests Message-ID: <20130715164441.4AB6C1C0205@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65404:ebffae7b6139 Date: 2013-07-15 00:36 +0300 http://bitbucket.org/pypy/pypy/changeset/ebffae7b6139/ Log: write more subtype tests diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -122,10 +122,27 @@ assert not isinstance(c.flat[:] + c.flat[:], self.NoNew) def test_sub_getitem_filter(self): - assert False + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + c = b[array([False, True, False, True, False])] + assert c.shape == (2,) + assert (c == [1, 3]).all() + assert isinstance(c, self.SubType) + assert b.called_new + assert not getattr(c, 'called_new', False) + assert c.called_finalize def test_sub_getitem_array_int(self): - assert False + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + assert b.called_new + c = b[array([3, 2, 1, 4])] + assert (c == [3, 2, 1, 4]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize def test_sub_round(self): from numpypy import array @@ -140,12 +157,38 @@ def test_sub_dot(self): # the returned type is that of the first argument - assert False + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = array(range(12)).reshape(4,3).view(self.SubType) + d = c.dot(a) + assert isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert d.called_finalize + d = a.dot(c) + assert not isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert not getattr(d, 'called_finalize', False) def test_sub_reduce(self): # i.e. sum, max # test for out as well - assert False + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = b.sum(axis=0) + assert (c == [12, 15, 18, 21]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + d = array(range(4)) + c = b.sum(axis=0, out=d) + assert c is d + assert not isinstance(c, self.SubType) + d = array(range(4)).view(self.NoNew) + c = b.sum(axis=0, out=d) + assert c is d + assert isinstance(c, self.NoNew) def test_sub_call2(self): # c + a vs. a + c, what about array priority? From noreply at buildbot.pypy.org Mon Jul 15 18:44:42 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 15 Jul 2013 18:44:42 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: more hacks in compile.py Message-ID: <20130715164442.6ADBA1C0205@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65405:e6eee323ec9a Date: 2013-07-15 19:38 +0300 http://bitbucket.org/pypy/pypy/changeset/e6eee323ec9a/ Log: more hacks in compile.py diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -212,6 +212,10 @@ def call_function(self, tp, w_dtype): return w_dtype + def call_method(self, w_obj, s, *args, **kwargs): + # XXX hack + return getattr(w_obj, 'descr_' + s)(self, *args, **kwargs) + @specialize.arg(1) def interp_w(self, tp, what): assert isinstance(what, tp) From noreply at buildbot.pypy.org Mon Jul 15 19:42:36 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 15 Jul 2013 19:42:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Added a missing CLI flag. Message-ID: <20130715174236.87E981C0205@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65406:fb06dfd56f2d Date: 2013-07-15 10:41 -0700 http://bitbucket.org/pypy/pypy/changeset/fb06dfd56f2d/ Log: Added a missing CLI flag. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -196,6 +196,11 @@ print >> sys.stderr, "Python", sys.version raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -381,6 +386,7 @@ 'Q': (div_option, Ellipsis), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } From noreply at buildbot.pypy.org Mon Jul 15 20:27:57 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 15 Jul 2013 20:27:57 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: fixed a typo with _attr_ -> _attrs_ and subsequent annotator errors Message-ID: <20130715182757.804A11C101D@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r497:0a1371c9a90f Date: 2013-07-15 20:26 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0a1371c9a90f/ Log: fixed a typo with _attr_ -> _attrs_ and subsequent annotator errors diff --git a/spyvm/error.py b/spyvm/error.py --- a/spyvm/error.py +++ b/spyvm/error.py @@ -27,6 +27,7 @@ pass class Exit(Exception): + _attrs_ = ["msg"] def __init__(self, msg): self.msg = msg diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -195,19 +195,23 @@ return symbol * (self.max_stack_depth - self.remaining_stack_depth) class ReturnFromTopLevel(Exception): + _attrs_ = ["object"] def __init__(self, object): self.object = object class StackOverflow(Exception): + _attrs_ = ["s_context"] def __init__(self, s_top_context): self.s_context = s_top_context class Return(Exception): + _attrs_ = ["value", "s_target_context"] def __init__(self, object, s_context): self.value = object self.s_target_context = s_context class ProcessSwitch(Exception): + _attrs_ = ["s_new_context"] def __init__(self, s_context): self.s_new_context = s_context diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -51,8 +51,9 @@ def wrapped(*c_arguments): assert len_unwrap_spec == len(c_arguments) args = () - if IProxy.interp.trace_proxy: + if IProxy.trace_proxy: print 'Called InterpreterProxy >> %s' % func.func_name, + assert IProxy.s_frame is not None and IProxy.space is not None and IProxy.interp is not None try: for i, spec in unrolling_unwrap_spec: c_arg = c_arguments[i] @@ -63,7 +64,7 @@ else: args += (c_arg, ) result = func(*args) - if IProxy.interp.trace_proxy: + if IProxy.trace_proxy: print '\t-> %s' % result if result_type is oop: assert isinstance(result, model.W_Object) @@ -990,6 +991,7 @@ def __init__(self): self.vm_proxy = lltype.nullptr(VMPtr.TO) self.vm_initialized = False + self.space = None self._next_oop = 0 self.oop_map = {} self.object_map = {} @@ -1003,6 +1005,7 @@ self.argcount = 0 self.s_method = None self.fail_reason = 0 + self.trace_proxy = False def call(self, signature, interp, s_frame, argcount, s_method): self.initialize_from_call(signature, interp, s_frame, argcount, s_method) @@ -1040,6 +1043,7 @@ self.argcount = argcount self.s_method = s_method self.space = interp.space + self.trace_proxy = interp.trace_proxy # ensure that space.w_nil gets the first possible oop self.object_to_oop(self.space.w_nil) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1074,6 +1074,8 @@ """ _immutable_fields_ = ["_shadow?"] + _attrs_ = ["bytes", "_likely_methodname", "header", "argsize", "primitive", + "literals", "tempsize", "literalsize", "islarge", "_shadow"] ### Extension from Squeak 3.9 doc, which we do not implement: ### trailer (variable) ### The trailer has two variant formats. In the first variant, the last diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -17,7 +17,7 @@ """A shadow is an optional extra bit of information that can be attached at run-time to any Smalltalk object. """ - _attr_ = ['_w_self'] + _attrs_ = ['_w_self', 'space'] def __init__(self, space, w_self): self.space = space @@ -37,7 +37,7 @@ class AbstractCachingShadow(AbstractShadow): _immutable_fields_ = ['version?'] - _attr_ = [] + _attrs_ = ['version'] def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) @@ -85,7 +85,7 @@ (i.e. used as the class of another Smalltalk object). """ - _attr_ = ["name", "_instance_size", "instance_varsized", "instance_kind", + _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", "_s_methoddict", "_s_superclass", "subclass_s"] def __init__(self, space, w_self): @@ -345,7 +345,7 @@ class MethodDictionaryShadow(AbstractShadow): _immutable_fields_ = ['invalid?', 's_class'] - _attr_ = ['methoddict'] + _attrs_ = ['methoddict', 'invalid', 's_class'] def __init__(self, space, w_self): self.invalid = True @@ -398,7 +398,7 @@ class AbstractRedirectingShadow(AbstractShadow): - _attr_ = ['_w_self_size'] + _attrs_ = ['_w_self_size'] def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) @@ -440,7 +440,7 @@ class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype - _attr_ = ['_s_sender', '_pc', '_temps_and_stack', + _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] _virtualizable2_ = [ @@ -734,7 +734,7 @@ class BlockContextShadow(ContextPartShadow): - _attr_ = ['_w_home', '_initialip', '_eargc'] + _attrs_ = ['_w_home', '_initialip', '_eargc'] @staticmethod def make_context(space, w_home, s_sender, argcnt, initialip): @@ -840,7 +840,7 @@ ) class MethodContextShadow(ContextPartShadow): - _attr_ = ['w_closure_or_nil', '_w_receiver', '_w_method'] + _attrs_ = ['w_closure_or_nil', '_w_receiver', '_w_method'] def __init__(self, space, w_self): self.w_closure_or_nil = space.w_nil @@ -1005,11 +1005,11 @@ return '%s%s' % (block, self.w_method().get_identifier_string()) class CompiledMethodShadow(object): - _attr_ = ["_w_self", "bytecode", - "literals[*]", "bytecodeoffset", - "literalsize", "tempsize", "primitive", + _attrs_ = ["_w_self", "bytecode", + "literals", "bytecodeoffset", + "literalsize", "_tempsize", "_primitive", "argsize", "islarge", - "w_compiledin"] + "w_compiledin", "version"] _immutable_fields_ = ["version?", "_w_self"] def __init__(self, w_compiledmethod): @@ -1038,13 +1038,13 @@ w_compiledmethod = self._w_self self.version = Version() self.bytecode = "".join(w_compiledmethod.bytes) - self.literals = w_compiledmethod.literals self.bytecodeoffset = w_compiledmethod.bytecodeoffset() self.literalsize = w_compiledmethod.getliteralsize() self._tempsize = w_compiledmethod.gettempsize() self._primitive = w_compiledmethod.primitive self.argsize = w_compiledmethod.argsize self.islarge = w_compiledmethod.islarge + self.literals = w_compiledmethod.literals self.w_compiledin = None if self.literals: @@ -1098,7 +1098,7 @@ class ObserveeShadow(AbstractShadow): - _attr_ = ['dependent'] + _attrs_ = ['dependent'] def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) self.dependent = None From noreply at buildbot.pypy.org Mon Jul 15 20:27:58 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 15 Jul 2013 20:27:58 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: readded RPython BitBlt code Message-ID: <20130715182758.A28B41C101D@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r498:a7da58655da8 Date: 2013-07-15 20:27 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a7da58655da8/ Log: readded RPython BitBlt code diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -92,6 +92,12 @@ def fillin(self, space, g_self): raise NotImplementedError() + def getword(self, n0): + raise NotImplementedError() + + def setword(self, n0, r_uint_value): + raise NotImplementedError() + def invariant(self): return True @@ -594,6 +600,14 @@ from spyvm.shadow import ObserveeShadow return self.as_special_get_shadow(space, ObserveeShadow) + def as_bitblt_get_shadow(self, space): + from spyvm.shadow import BitBltShadow + return self.as_special_get_shadow(space, BitBltShadow) + + def as_form_get_shadow(self, space): + from spyvm.shadow import FormShadow + return self.as_special_get_shadow(space, FormShadow) + def has_shadow(self): return self._shadow is not None diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -609,20 +609,48 @@ combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) if combinationRule > 41: raise PrimitiveFailedError + + space = interp.space + import time - space = interp.space + start = time.time() + print "blitting" + + # See BlueBook p.356ff + s_bitblt = w_rcvr.as_bitblt_get_shadow(space) + s_bitblt.sync_cache() + s_bitblt.clip_range() + if s_bitblt.w <= 0 or s_bitblt.h <= 0: + return w_rcvr # null range + s_bitblt.compute_masks() + s_bitblt.check_overlap() + s_bitblt.calculate_offsets() try: - s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) - except Return: - w_dest_form = w_rcvr.fetch(space, 0) - if w_dest_form.is_same_object(space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) - w_bitmap.flush_to_screen() + s_bitblt.copy_loop() + except IndexError: + raise PrimitiveFailedError() - # in case we return normally, we have to restore the removed w_rcvr + w_dest_form = w_rcvr.fetch(space, 0) + if w_dest_form.is_same_object(space.objtable['w_display']): + w_bitmap = w_dest_form.fetch(space, 0) + assert isinstance(w_bitmap, model.W_DisplayBitmap) + w_bitmap.flush_to_screen() + + print "blitting finshed after %d ms" % int((time.time() - start) * 1000) return w_rcvr + # try: + # s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) + # except Return: + # w_dest_form = w_rcvr.fetch(space, 0) + # if w_dest_form.is_same_object(space.objtable['w_display']): + # w_bitmap = w_dest_form.fetch(space, 0) + # assert isinstance(w_bitmap, model.W_DisplayBitmap) + # w_bitmap.flush_to_screen() + + # # in case we return normally, we have to restore the removed w_rcvr + # return w_rcvr + @expose_primitive(BE_CURSOR) def func(interp, s_frame, argcount): if not (0 <= argcount <= 1): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1113,3 +1113,288 @@ self.dependent = dependent def update(self): pass + + +class BitBltShadow(AbstractCachingShadow): + _attrs_ = [# From BitBlt + "dest_form", "source_form", "halftone_form", + "combination_rule", "dest_x", "dest_y", "width", + "height", "source_x", "source_y", "clip_x", "clip_y", + "clip_width", "clip_height", "color_map", + # From BitBltSimulation + "w", "h", "sx", "sy", "dx", "dy", + "dest_bits", "dest_raster", "source_bits", "source_raster", + "halftone_bits", "skew", "mask1", "mask2", "skew_mask", + "n_words", "h_dir", "v_dir", "preload", "source_index", + "dest_index", "source_delta", "dest_delta"] + + WordSize = 32 + RightMasks = [rarithmetic.r_uint(1)] + for i in xrange(WordSize): + RightMasks.append(rarithmetic.r_uint((2 ** (i + 2)) - 1)) + AllOnes = rarithmetic.r_uint((2 ** WordSize) - 1) + + def sync_cache(self): + try: + w_form = self.fetch(0) + assert isinstance(w_form, model.W_PointersObject) + s_form = w_form.as_form_get_shadow(self.space) + assert isinstance(s_form, FormShadow) + self.dest_form = s_form + except error.PrimitiveFailedError, e: + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise e + w_source_form = self.fetch(1) + if w_source_form is self.space.w_nil: + self.source_form = None + else: + try: + w_form = w_source_form + assert isinstance(w_form, model.W_PointersObject) + s_form = w_form.as_form_get_shadow(self.space) + assert isinstance(s_form, FormShadow) + self.source_form = s_form + except error.PrimitiveFailedError, e: + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise e + w_halftone_form = self.fetch(2) + if w_halftone_form is not self.space.w_nil: + if isinstance(w_halftone_form, model.W_WordsObject): + # Already a bitmap + self.halftone_bits = w_halftone_form.words + else: + assert isinstance(w_halftone_form, model.W_PointersObject) + w_bits = w_halftone_form.as_form_get_shadow(self.space).w_bits + assert isinstance(w_bits, model.W_WordsObject) + self.halftone_bits = w_bits.words + else: + self.halftone_bits = None + self.combination_rule = self.space.unwrap_int(self.fetch(3)) + self.dest_x = self.space.unwrap_int(self.fetch(4)) - 1 + self.dest_y = self.space.unwrap_int(self.fetch(5)) - 1 + self.width = self.space.unwrap_int(self.fetch(6)) + self.height = self.space.unwrap_int(self.fetch(7)) + self.source_x = self.space.unwrap_int(self.fetch(8)) - 1 + self.source_y = self.space.unwrap_int(self.fetch(9)) - 1 + self.clip_x = self.space.unwrap_int(self.fetch(10)) - 1 + self.clip_y = self.space.unwrap_int(self.fetch(11)) - 1 + self.clip_width = self.space.unwrap_int(self.fetch(12)) + self.clip_height = self.space.unwrap_int(self.fetch(13)) + self.color_map = self.fetch(14) + + def clip_range(self): + if self.dest_x >= self.clip_x: + self.sx = self.source_x + self.dx = self.dest_x + self.w = self.width + else: + self.sx = self.source_x + (self.clip_x - self.dest_x) + self.w = self.width - (self.clip_x - self.dest_x) + self.dx = self.clip_x + if self.dx + self.w > self.clip_x + self.clip_width: + self.w = self.w - (self.dx + self.w - (self.clip_x + self.clip_width)) + if self.dest_y >= self.clip_y: + self.sy = self.source_y + self.dy = self.dest_y + self.h = self.height + else: + self.sy = self.source_y + self.clip_y - self.dest_y + self.h = self.height - self.clip_y - self.dest_y + self.dy = self.clip_y + if self.dy + self.h > self.clip_y + self.clip_height: + self.h = self.h - (self.dy + self.h - (self.clip_y + self.clip_height)) + if self.source_form is None: + return + if self.sx < 0: + self.dx = self.dx - self.sx + self.w = self.w + self.sx + self.sx = 0 + if self.sx + self.w > self.source_form.width: + self.w = self.w - (self.sx + self.w - self.source_form.width) + if self.sy < 0: + self.dy = self.dy - self.sy + self.h = self.h + self.sy + self.sy = 0 + if self.sy + self.h > self.source_form.height: + self.h = self.h - (self.sy + self.h - self.source_form.height) + + def compute_masks(self): + self.dest_bits = self.dest_form.w_bits + self.dest_raster = (self.dest_form.width - 1) // BitBltShadow.WordSize + 1 + if self.source_form is not None: + self.source_bits = self.source_form.w_bits + self.source_raster = (self.source_form.width - 1) // BitBltShadow.WordSize + 1 + else: + self.source_bits = None + self.source_raster = 0 + # Halftone form is set during synchronization + self.skew = (self.sx - self.dx) & (BitBltShadow.WordSize - 1) + start_bits = BitBltShadow.WordSize - (self.dx & (BitBltShadow.WordSize - 1)) + self.mask1 = BitBltShadow.RightMasks[start_bits] + end_bits = (BitBltShadow.WordSize - 1) - ((self.dx + self.w - 1) & (BitBltShadow.WordSize - 1)) + self.mask2 = ~BitBltShadow.RightMasks[end_bits] + if self.skew == 0: + self.skew_mask = rarithmetic.r_uint(0) + else: + self.skew_mask = BitBltShadow.RightMasks[BitBltShadow.WordSize - self.skew] + if self.w < start_bits: + self.mask1 = self.mask1 & self.mask2 + self.mask2 = rarithmetic.r_uint(0) + self.n_words = 1 + else: + self.n_words = (self.w - start_bits - 1) // BitBltShadow.WordSize + 2 + + def check_overlap(self): + self.h_dir = 1 + self.v_dir = 1 + if (self.source_form is not None and + self.source_form.w_self().is_same_object(self.dest_form.w_self()) and + self.dy >= self.sy): + if self.dy > self.sy: + self.v_dir = -1 + self.sy = self.sy + self.h - 1 + self.dy = self.dy + self.h - 1 + elif self.dx > self.sx: + self.h_dir = -1 + self.sx = self.sx + self.w - 1 + self.dx = self.dx + self.w - 1 + self.skew_mask = ~self.skew_mask + assert isinstance(self.mask2, rarithmetic.r_uint) + self.mask1, self.mask2 = self.mask2, self.mask1 + + def calculate_offsets(self): + self.preload = (self.source_form is not None and ( + self.skew_mask != 0 and + self.skew <= (self.sx & (BitBltShadow.WordSize - 1)))) + if self.h_dir < 0: + self.preload = not self.preload + self.source_index = self.sy * self.source_raster + self.sx // BitBltShadow.WordSize + self.dest_index = self.dy * self.dest_raster + self.dx // BitBltShadow.WordSize + self.source_delta = ((self.source_raster * + self.v_dir - + (self.n_words + (1 if self.preload else 0))) * + self.h_dir) + self.dest_delta = self.dest_raster * self.v_dir - self.n_words * self.h_dir + + def copy_loop(self): + space = self.space + no_skew_mask = ~self.skew_mask + for i in xrange(self.h): + if self.halftone_bits: + halftone_word = self.halftone_bits[self.dy % len(self.halftone_bits)] + self.dy = self.dy + self.v_dir + else: + halftone_word = BitBltShadow.AllOnes + skew_word = halftone_word + if self.preload: + prev_word = self.source_bits.getword(self.source_index) + self.source_index = self.source_index + self.h_dir + else: + prev_word = 0 + merge_mask = self.mask1 + for word in xrange(self.n_words): + if self.source_form is not None: + prev_word = prev_word & self.skew_mask + try: + this_word = self.source_bits.getword(self.source_index) + except IndexError: + this_word = self.source_bits.getword(0) + skew_word = prev_word | (this_word & no_skew_mask) + prev_word = this_word + skew_word = (self.bit_shift(skew_word, self.skew) | + self.bit_shift(skew_word, self.skew - 16)) + merge_word = rarithmetic.r_uint(self.merge( + skew_word & halftone_word, + self.dest_bits.getword(self.dest_index) + )) + __new = ( + (merge_mask & merge_word) | + (~merge_mask & self.dest_bits.getword(self.dest_index)) + ) + self.dest_bits.setword(self.dest_index, __new) + self.source_index = self.source_index + self.h_dir + self.dest_index = self.dest_index + self.h_dir + if word == (self.n_words - 1): + merge_mask = self.mask2 + else: + merge_mask = BitBltShadow.AllOnes + self.source_index = self.source_index + self.source_delta + self.dest_index = self.dest_index + self.dest_delta + + def bit_shift(self, target, amount): + if amount > 0: + return (rarithmetic.r_uint(target) << amount) & BitBltShadow.AllOnes + else: + return (rarithmetic.r_uint(target) >> -amount) & BitBltShadow.AllOnes + + def merge(self, source_word, dest_word): + if self.combination_rule == 0: + return 0 + elif self.combination_rule == 1: + return source_word & dest_word + elif self.combination_rule == 2: + return source_word & ~dest_word + elif self.combination_rule == 3: + return source_word + elif self.combination_rule == 4: + return ~source_word & dest_word + elif self.combination_rule == 5: + return dest_word + elif self.combination_rule == 6: + return source_word ^ dest_word + elif self.combination_rule == 7: + return source_word | dest_word + elif self.combination_rule == 8: + return ~source_word & ~dest_word + elif self.combination_rule == 9: + return ~source_word ^ dest_word + elif self.combination_rule == 10: + return ~dest_word + elif self.combination_rule == 11: + return source_word | ~dest_word + elif self.combination_rule == 12: + return ~source_word + elif self.combination_rule == 13: + return ~source_word | dest_word + elif self.combination_rule == 14: + return ~source_word | ~dest_word + elif self.combination_rule == 15: + return dest_word & BitBltShadow.AllOnes + else: + raise error.PrimitiveFailedError() + + +class FormShadow(AbstractCachingShadow): + _attrs_ = ["w_bits", "width", "height", "depth", "offset_x", "offset_y"] + + def sync_cache(self): + self.w_bits = self.fetch(0) + if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise error.PrimitiveFailedError + self.width = self.space.unwrap_int(self.fetch(1)) + self.height = self.space.unwrap_int(self.fetch(2)) + self.depth = self.space.unwrap_int(self.fetch(3)) + w_offset = self.fetch(4) + assert isinstance(w_offset, model.W_PointersObject) + if not w_offset is self.space.w_nil: + self.offset_x = self.space.unwrap_int(w_offset._fetch(0)) - 1 + self.offset_y = self.space.unwrap_int(w_offset._fetch(1)) - 1 + + # def replace_bits(self): + # w_bits = self.w_bits + # if isinstance(w_bits, model.W_WordsObject): + # pass + # elif isinstance(w_bits, model.W_DisplayBitmap): + # w_bits.update_from_buffer() + # else: + # w_self = self.w_self() + # assert isinstance(w_self, model.W_PointersObject) + # w_self._shadow = None + # raise error.PrimitiveFailedError \ No newline at end of file From noreply at buildbot.pypy.org Mon Jul 15 23:21:00 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 15 Jul 2013 23:21:00 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: hack till zjit tests do not fail Message-ID: <20130715212100.0E81B1C0205@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65407:372ae0ab14c5 Date: 2013-07-16 00:19 +0300 http://bitbucket.org/pypy/pypy/changeset/372ae0ab14c5/ Log: hack till zjit tests do not fail diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -16,6 +16,7 @@ else: w_ret = space.allocate_instance(W_NDimArray, w_cls) W_NDimArray.__init__(w_ret, impl) + assert isinstance(w_ret, W_NDimArray) space.call_method(w_ret, '__array_finalize__', w_instance) return w_ret diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -192,29 +192,28 @@ return w_obj is w_what def issubtype(self, w_type1, w_type2): - if not w_type2: - return self.wrap(False) - return self.wrap(issubclass(w_type1, w_type2)) + return BoolObject(True) def type(self, w_obj): + if self.is_none(w_obj): + return self.w_None try: return w_obj.tp except AttributeError: if isinstance(w_obj, W_NDimArray): return W_NDimArray - if issubclass(w_obj, W_NDimArray): - return W_NDimArray - return None + return self.w_None def gettypefor(self, w_obj): - return self.type(w_obj) + return None def call_function(self, tp, w_dtype): return w_dtype - def call_method(self, w_obj, s, *args, **kwargs): - # XXX hack - return getattr(w_obj, 'descr_' + s)(self, *args, **kwargs) + def call_method(self, w_obj, s, *args): + # XXX even the hacks have hacks + return None + #return getattr(w_obj, 'descr_' + s)(self, *args) @specialize.arg(1) def interp_w(self, tp, what): @@ -222,9 +221,7 @@ return what def allocate_instance(self, klass, w_subtype): - inst = instantiate(klass) - inst.tp = klass - return inst + return instantiate(klass) def newtuple(self, list_w): return ListObject(list_w) @@ -714,7 +711,7 @@ elif token.v.strip(' ') == 'float': stack.append(DtypeClass('float')) else: - stack.append(Variable(token.v)) + stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': stack.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'operator': From noreply at buildbot.pypy.org Tue Jul 16 01:38:51 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 16 Jul 2013 01:38:51 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20130715233851.711E11C1464@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65408:dedb7f16dcd3 Date: 2013-07-11 12:38 -0700 http://bitbucket.org/pypy/pypy/changeset/dedb7f16dcd3/ Log: 2to3 diff --git a/pypy/module/_locale/test/test_locale.py b/pypy/module/_locale/test/test_locale.py --- a/pypy/module/_locale/test/test_locale.py +++ b/pypy/module/_locale/test/test_locale.py @@ -159,11 +159,11 @@ _locale.setlocale(_locale.LC_ALL, self.language_pl) assert _locale.strcoll("a", "b") < 0 assert _locale.strcoll( - u"\N{LATIN SMALL LETTER A WITH OGONEK}", + "\N{LATIN SMALL LETTER A WITH OGONEK}", "b") < 0 assert _locale.strcoll( - u"\N{LATIN SMALL LETTER C WITH ACUTE}", + "\N{LATIN SMALL LETTER C WITH ACUTE}", "b") > 0 assert _locale.strcoll("c", "b") > 0 From noreply at buildbot.pypy.org Tue Jul 16 01:38:53 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 16 Jul 2013 01:38:53 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130715233853.3EF411C1464@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65409:87a22aa29968 Date: 2013-07-11 16:07 -0700 http://bitbucket.org/pypy/pypy/changeset/87a22aa29968/ Log: merge default diff too long, truncating to 2000 out of 2420 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -219,32 +282,32 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -32,11 +32,10 @@ * go to pypy/tool/release and run: force-builds.py /release/ * wait for builds to complete, make sure there are no failures -* run pypy/tool/release/make_release.py, this will build necessary binaries - and upload them to pypy.org +* upload binaries to https://bitbucket.org/pypy/pypy/downloads Following binaries should be built, however, we need more buildbots: - JIT: windows, linux, os/x + JIT: windows, linux, os/x, armhf, armel no JIT: windows, linux, os/x sandbox: linux, os/x diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst --- a/pypy/doc/release-2.1.0-beta1.rst +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -22,6 +22,8 @@ * Stacklet support on ARM +* Interpreter improvements + * Various numpy improvements * Bugfixes to cffi and ctypes diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,7 @@ .. branch: improve-str2charp Improve the performance of I/O writing up to 15% by using memcpy instead of copying char-by-char in str2charp and get_nonmovingbuffer + +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -376,7 +376,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -247,6 +247,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -511,7 +511,7 @@ option_ptr = rffi.cast(rffi.INTP, value_ptr) option_ptr[0] = space.int_w(w_option) elif cmd == _c.SIO_KEEPALIVE_VALS: - w_onoff, w_time, w_interval = space.unpackiterable(w_option) + w_onoff, w_time, w_interval = space.unpackiterable(w_option, 3) option_ptr = rffi.cast(lltype.Ptr(_c.tcp_keepalive), value_ptr) option_ptr.c_onoff = space.uint_w(w_onoff) option_ptr.c_keepalivetime = space.uint_w(w_time) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -105,7 +105,7 @@ return space.newtuple([w_fileobj, w_filename, w_import_info]) def load_module(space, w_name, w_file, w_filename, w_info): - w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info) + w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info, 3) filename = space.fsencode_w(w_filename) filemode = space.str_w(w_filemode) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -760,6 +760,10 @@ assert module.__name__ == 'a' assert module.__file__ == 'invalid_path_name' + def test_crash_load_module(self): + import imp + raises(ValueError, imp.load_module, "", "", "", [1, 2, 3, 4]) + def test_source_encoding(self): import imp import encoded diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -318,7 +318,7 @@ if not base.issequence_w(space, w_shape): w_shape = space.newtuple([w_shape,]) else: - w_fldname, w_flddesc = space.fixedview(w_elem) + w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) fldname = space.str_w(w_fldname) if fldname in fields: diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -28,6 +28,9 @@ 'pypy-c-jit-linux-x86-64', 'pypy-c-jit-macosx-x86-64', 'pypy-c-jit-win-x86-32', + 'build-pypy-c-jit-linux-armhf-raring', + 'build-pypy-c-jit-linux-armhf-raspbian', + 'build-pypy-c-jit-linux-armel', ] def main(): diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. @@ -46,7 +46,8 @@ os.system("chmod -R g-w %s" % basedir) def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', - copy_to_dir = None, override_pypy_c = None, nostrip=False): + copy_to_dir=None, override_pypy_c=None, nostrip=False, + withouttk=False): basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' @@ -70,6 +71,14 @@ if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. +You can either install Tk development headers package or +add --without-tk option to skip packaging binary CFFI extension.""" + sys.exit(1) if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -183,14 +192,28 @@ print "Ready in %s" % (builddir,) return builddir # for tests + +def print_usage(): + print >>sys.stderr, __doc__ + sys.exit(1) + + if __name__ == '__main__': if len(sys.argv) == 1: - print >>sys.stderr, __doc__ - sys.exit(1) - else: - args = sys.argv[1:] - kw = {} - if args[0] == '--nostrip': + print_usage() + + args = sys.argv[1:] + kw = {} + + for i, arg in enumerate(args): + if arg == '--nostrip': kw['nostrip'] = True - args = args[1:] - package(*args, **kw) + elif arg == '--without-tk': + kw['withouttk'] = True + elif not arg.startswith('--'): + break + else: + print_usage() + + args = args[i:] + package(*args, **kw) diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -170,7 +170,6 @@ cmdline="--make-jobs", default=detect_number_of_processors()), # Flags of the TranslationContext: - BoolOption("simplifying", "Simplify flow graphs", default=True), BoolOption("list_comprehension_operations", "When true, look for and special-case the sequence of " "operations that results from a list comprehension and " diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -14,7 +14,7 @@ recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, rpython_print_newline) -from rpython.flowspace.operation import implicit_exceptions +from rpython.flowspace.operation import op class FlowingError(Exception): @@ -55,25 +55,44 @@ pass class SpamBlock(Block): - # make slots optional, for debugging - if hasattr(Block, '__slots__'): - __slots__ = "dead framestate".split() - def __init__(self, framestate): Block.__init__(self, framestate.getvariables()) self.framestate = framestate self.dead = False + def make_recorder(self): + return BlockRecorder(self) + class EggBlock(Block): - # make slots optional, for debugging - if hasattr(Block, '__slots__'): - __slots__ = "prevblock booloutcome last_exception".split() - def __init__(self, inputargs, prevblock, booloutcome): Block.__init__(self, inputargs) self.prevblock = prevblock self.booloutcome = booloutcome + @property + def ancestor(self): + parent = self.prevblock + while isinstance(parent, EggBlock): + parent = parent.prevblock + return parent + + @property + def dead(self): + return self.ancestor.dead + + @property + def framestate(self): + return self.ancestor.framestate + + def make_recorder(self): + recorder = BlockRecorder(self) + curr = self + while isinstance(curr, EggBlock): + prev = curr.prevblock + recorder = Replayer(prev, curr.booloutcome, recorder) + curr = prev + return recorder + def extravars(self, last_exception=None, last_exc_value=None): self.last_exception = last_exception @@ -209,6 +228,7 @@ w_exc_cls, w_exc_value = egg.inputargs[-2:] if isinstance(egg.last_exception, Constant): w_exc_cls = egg.last_exception + assert not isinstance(w_exc_cls.value, list) raise ImplicitOperationError(w_exc_cls, w_exc_value) # ____________________________________________________________ @@ -430,44 +450,23 @@ self.last_instr = state.next_instr self.blockstack = state.blocklist[:] - def recording(self, block): - """ Setup recording of the block and return the recorder. """ - parentblocks = [] - parent = block - while isinstance(parent, EggBlock): - parent = parent.prevblock - parentblocks.append(parent) - # parentblocks = [Egg, Egg, ..., Egg, Spam] not including block - if parent.dead: - raise StopFlowing - self.setstate(parent.framestate) - recorder = BlockRecorder(block) - prevblock = block - for parent in parentblocks: - recorder = Replayer(parent, prevblock.booloutcome, recorder) - prevblock = parent - return recorder + def guessbool(self, w_condition, **kwds): + return self.recorder.guessbool(self, w_condition, **kwds) - def record(self, spaceop): - """Record an operation into the active block""" + def do_operation(self, name, *args_w): recorder = self.recorder if getattr(recorder, 'final_state', None) is not None: self.mergeblock(recorder.crnt_block, recorder.final_state) raise StopFlowing - recorder.append(spaceop) - - def guessbool(self, w_condition, **kwds): - return self.recorder.guessbool(self, w_condition, **kwds) - - def do_operation(self, name, *args_w): spaceop = SpaceOperation(name, args_w, Variable()) spaceop.offset = self.last_instr - self.record(spaceop) + recorder.append(spaceop) return spaceop.result def do_operation_with_implicit_exceptions(self, name, *args_w): w_result = self.do_operation(name, *args_w) - self.handle_implicit_exceptions(implicit_exceptions.get(name)) + oper = getattr(op, name) + self.handle_implicit_exceptions(oper.canraise) return w_result def handle_implicit_exceptions(self, exceptions): @@ -488,39 +487,44 @@ self.pendingblocks = collections.deque([graph.startblock]) while self.pendingblocks: block = self.pendingblocks.popleft() - try: - self.recorder = self.recording(block) - while True: - self.last_instr = self.handle_bytecode(self.last_instr) - self.recorder.final_state = self.getstate() + if not block.dead: + self.record_block(block) - except ImplicitOperationError, e: - if isinstance(e.w_type, Constant): - exc_cls = e.w_type.value - else: - exc_cls = Exception - msg = "implicit %s shouldn't occur" % exc_cls.__name__ - w_type = Constant(AssertionError) - w_value = Constant(AssertionError(msg)) - link = Link([w_type, w_value], graph.exceptblock) - self.recorder.crnt_block.closeblock(link) + def record_block(self, block): + self.setstate(block.framestate) + self.recorder = block.make_recorder() + try: + while True: + self.last_instr = self.handle_bytecode(self.last_instr) + self.recorder.final_state = self.getstate() - except FSException, e: - if e.w_type is self.space.w_ImportError: - msg = 'import statement always raises %s' % e - raise ImportError(msg) - link = Link([e.w_type, e.w_value], graph.exceptblock) - self.recorder.crnt_block.closeblock(link) + except ImplicitOperationError, e: + if isinstance(e.w_type, Constant): + exc_cls = e.w_type.value + else: + exc_cls = Exception + msg = "implicit %s shouldn't occur" % exc_cls.__name__ + w_type = Constant(AssertionError) + w_value = Constant(AssertionError(msg)) + link = Link([w_type, w_value], self.graph.exceptblock) + self.recorder.crnt_block.closeblock(link) - except StopFlowing: - pass + except FSException, e: + if e.w_type == self.space.w_ImportError: + msg = 'import statement always raises %s' % e + raise ImportError(msg) + link = Link([e.w_type, e.w_value], self.graph.exceptblock) + self.recorder.crnt_block.closeblock(link) - except Return as exc: - w_result = exc.value - link = Link([w_result], graph.returnblock) - self.recorder.crnt_block.closeblock(link) + except StopFlowing: + pass - del self.recorder + except Return as exc: + w_result = exc.value + link = Link([w_result], self.graph.returnblock) + self.recorder.crnt_block.closeblock(link) + + self.recorder = None def mergeblock(self, currentblock, currentstate): next_instr = currentstate.next_instr @@ -661,8 +665,8 @@ self.last_exception = operr raise operr else: - raise FSException(space.w_TypeError, - space.wrap("raise: no active exception to re-raise")) + raise space.exc_wrap(TypeError( + "raise: no active exception to re-raise")) w_value = space.w_None if nbargs >= 3: diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -3,6 +3,7 @@ # # the below object/attribute model evolved from # a discussion in Berlin, 4th of october 2003 +import types import py from rpython.tool.uid import uid, Hashable @@ -261,6 +262,7 @@ dummyname = 'v' namesdict = {dummyname : (dummyname, 0)} + @property def name(self): _name = self._name _nr = self._nr @@ -270,11 +272,10 @@ _nr = self._nr = nd[_name][1] nd[_name] = (_name, _nr + 1) return "%s%d" % (_name, _nr) - name = property(name) + @property def renamed(self): return self._name is not self.dummyname - renamed = property(renamed) def __init__(self, name=None): self._name = self.dummyname @@ -314,6 +315,9 @@ self._name = intern(name) self._nr = nr + def foldable(self): + return False + class Constant(Hashable): __slots__ = ["concretetype"] @@ -323,6 +327,25 @@ if concretetype is not None: self.concretetype = concretetype + def foldable(self): + to_check = self.value + if hasattr(to_check, 'im_self'): + to_check = to_check.im_self + if isinstance(to_check, (type, types.ClassType, types.ModuleType)): + # classes/types/modules are assumed immutable + return True + if (hasattr(to_check, '__class__') and + to_check.__class__.__module__ == '__builtin__'): + # builtin object + return True + # User-created instance + if hasattr(to_check, '_freeze_'): + assert to_check._freeze_() is True + return True + else: + # cannot count on it not mutating at runtime! + return False + class UnwrapException(Exception): """Attempted to unwrap a Variable.""" diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -19,7 +19,7 @@ from rpython.flowspace.pygraph import PyGraph from rpython.flowspace.specialcase import SPECIAL_CASES from rpython.rlib.unroll import unrolling_iterable, _unroller -from rpython.rlib import rstackovf, rarithmetic +from rpython.rlib import rstackovf from rpython.rlib.rarithmetic import is_valid_int @@ -45,6 +45,16 @@ } } +# built-ins that can always raise exceptions +builtins_exceptions = { + int: [ValueError], + float: [ValueError], + chr: [ValueError], + unichr: [ValueError], + unicode: [UnicodeDecodeError], +} + + def _assert_rpythonic(func): """Raise ValueError if ``func`` is obviously not RPython""" if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): @@ -135,6 +145,11 @@ raise WrapException return Constant(obj) + def exc_wrap(self, exc): + w_value = self.wrap(exc) + w_type = self.wrap(type(exc)) + return FSException(w_type, w_value) + def int_w(self, w_obj): if isinstance(w_obj, Constant): val = w_obj.value @@ -143,15 +158,6 @@ return val return self.unwrap(w_obj) - def uint_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if type(val) is not rarithmetic.r_uint: - raise TypeError("expected unsigned: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - - def str_w(self, w_obj): if isinstance(w_obj, Constant): val = w_obj.value @@ -160,14 +166,6 @@ return val return self.unwrap(w_obj) - def float_w(self, w_obj): - if isinstance(w_obj, Constant): - val = w_obj.value - if type(val) is not float: - raise TypeError("expected float: " + repr(w_obj)) - return val - return self.unwrap(w_obj) - def unwrap(self, w_obj): if isinstance(w_obj, Variable): raise UnwrapException @@ -176,40 +174,9 @@ else: raise TypeError("not wrapped: " + repr(w_obj)) - def unwrap_for_computation(self, w_obj): - obj = self.unwrap(w_obj) - to_check = obj - if hasattr(to_check, 'im_self'): - to_check = to_check.im_self - if (not isinstance(to_check, (type, types.ClassType, types.ModuleType)) and - # classes/types/modules are assumed immutable - hasattr(to_check, '__class__') and to_check.__class__.__module__ != '__builtin__'): - frozen = hasattr(to_check, '_freeze_') - if frozen: - assert to_check._freeze_() is True - else: - # cannot count on it not mutating at runtime! - raise UnwrapException - return obj - def exception_issubclass_w(self, w_cls1, w_cls2): return self.is_true(self.issubtype(w_cls1, w_cls2)) - def _exception_match(self, w_exc_type, w_check_class): - """Helper for exception_match - - Handles the base case where w_check_class is a constant exception - type. - """ - if self.is_w(w_exc_type, w_check_class): - return True # fast path (also here to handle string exceptions) - try: - return self.exception_issubclass_w(w_exc_type, w_check_class) - except FSException, e: - if e.match(self, self.w_TypeError): # string exceptions maybe - return False - raise - def exception_match(self, w_exc_type, w_check_class): """Checks if the given exception type matches 'w_check_class'.""" try: @@ -221,47 +188,43 @@ "Catching %s is not valid in RPython" % check_class.__name__) if not isinstance(check_class, tuple): # the simple case - return self._exception_match(w_exc_type, w_check_class) + return self.exception_issubclass_w(w_exc_type, w_check_class) # special case for StackOverflow (see rlib/rstackovf.py) if check_class == rstackovf.StackOverflow: w_real_class = self.wrap(rstackovf._StackOverflow) - return self._exception_match(w_exc_type, w_real_class) + return self.exception_issubclass_w(w_exc_type, w_real_class) # checking a tuple of classes for w_klass in self.unpackiterable(w_check_class): if self.exception_match(w_exc_type, w_klass): return True return False - def exc_from_raise(self, w_type, w_value): + def exc_from_raise(self, w_arg1, w_arg2): """ Create a wrapped exception from the arguments of a raise statement. Returns an FSException object whose w_value is an instance of w_type. """ - if self.isinstance_w(w_type, self.w_type): + if self.isinstance_w(w_arg1, self.w_type): # this is for all cases of the form (Class, something) - if self.is_w(w_value, self.w_None): + if self.is_w(w_arg2, self.w_None): # raise Type: we assume we have to instantiate Type - w_value = self.call_function(w_type) - w_type = self.type(w_value) + w_value = self.call_function(w_arg1) else: - w_valuetype = self.type(w_value) - if self.exception_issubclass_w(w_valuetype, w_type): + w_valuetype = self.type(w_arg2) + if self.exception_issubclass_w(w_valuetype, w_arg1): # raise Type, Instance: let etype be the exact type of value - w_type = w_valuetype + w_value = w_arg2 else: # raise Type, X: assume X is the constructor argument - w_value = self.call_function(w_type, w_value) - w_type = self.type(w_value) + w_value = self.call_function(w_arg1, w_arg2) else: # the only case left here is (inst, None), from a 'raise inst'. - w_inst = w_type - w_instclass = self.type(w_inst) - if not self.is_w(w_value, self.w_None): - raise FSException(self.w_TypeError, self.wrap( + if not self.is_w(w_arg2, self.w_None): + raise self.exc_wrap(TypeError( "instance exception may not have a separate value")) - w_value = w_inst - w_type = w_instclass + w_value = w_arg1 + w_type = self.type(w_value) return FSException(w_type, w_value) def unpackiterable(self, w_iterable): @@ -291,12 +254,8 @@ return self.wrap(not self.is_true(w_obj)) def is_true(self, w_obj): - try: - obj = self.unwrap_for_computation(w_obj) - except UnwrapException: - pass - else: - return bool(obj) + if w_obj.foldable(): + return bool(w_obj.value) w_truthvalue = self.frame.do_operation('is_true', w_obj) return self.frame.guessbool(w_truthvalue) @@ -316,7 +275,7 @@ try: v, next_unroller = it.step() except IndexError: - raise FSException(self.w_StopIteration, self.w_None) + raise self.exc_wrap(StopIteration()) else: frame.replace_in_stack(it, next_unroller) return self.wrap(v) @@ -324,16 +283,6 @@ frame.handle_implicit_exceptions([StopIteration, RuntimeError]) return w_item - def setitem(self, w_obj, w_key, w_val): - # protect us from globals write access - if w_obj is self.frame.w_globals: - raise FlowingError(self.frame, - "Attempting to modify global variable %r." % (w_key)) - return self.frame.do_operation_with_implicit_exceptions('setitem', - w_obj, w_key, w_val) - - def setitem_str(self, w_obj, key, w_value): - return self.setitem(w_obj, self.wrap(key), w_value) def getattr(self, w_obj, w_name): # handling special things like sys @@ -343,12 +292,8 @@ if w_name not in const_w: return self.frame.do_operation_with_implicit_exceptions('getattr', w_obj, w_name) - try: - obj = self.unwrap_for_computation(w_obj) - name = self.unwrap_for_computation(w_name) - except UnwrapException: - pass - else: + if w_obj.foldable() and w_name.foldable(): + obj, name = w_obj.value, w_name.value try: result = getattr(obj, name) except Exception, e: @@ -369,8 +314,8 @@ def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: mod = __import__(name, glob, loc, frm, level) - except ImportError, e: - raise FSException(self.w_ImportError, self.wrap(str(e))) + except ImportError as e: + raise self.exc_wrap(e) return self.wrap(mod) def import_from(self, w_module, w_name): @@ -378,15 +323,15 @@ assert isinstance(w_name, Constant) # handle sys if w_module in self.not_really_const: - const_w = self.not_really_const[w_obj] + const_w = self.not_really_const[w_module] if w_name not in const_w: return self.frame.do_operation_with_implicit_exceptions('getattr', - w_obj, w_name) + w_module, w_name) try: return self.wrap(getattr(w_module.value, w_name.value)) except AttributeError: - raise FSException(self.w_ImportError, - self.wrap("cannot import name '%s'" % w_name.value)) + raise self.exc_wrap(ImportError( + "cannot import name '%s'" % w_name.value)) def call_method(self, w_obj, methname, *arg_w): w_meth = self.getattr(w_obj, self.wrap(methname)) @@ -417,7 +362,7 @@ args_w = args.arguments_w + self.unpackiterable(args.w_stararg) else: args_w = args.arguments_w - return sc(self, fn, args_w) + return sc(self, args_w) if args.keywords or isinstance(args.w_stararg, Variable): shape, args_w = args.flatten() @@ -430,15 +375,6 @@ args_w = args.arguments_w w_res = self.frame.do_operation('simple_call', w_callable, *args_w) - # maybe the call has generated an exception (any one) - # but, let's say, not if we are calling a built-in class or function - # because this gets in the way of the special-casing of - # - # raise SomeError(x) - # - # as shown by test_objspace.test_raise3. - - exceptions = [Exception] # *any* exception by default if isinstance(w_callable, Constant): c = w_callable.value if (isinstance(c, (types.BuiltinFunctionType, @@ -446,8 +382,11 @@ types.ClassType, types.TypeType)) and c.__module__ in ['__builtin__', 'exceptions']): - exceptions = operation.implicit_exceptions.get(c) - self.frame.handle_implicit_exceptions(exceptions) + if c in builtins_exceptions: + self.frame.handle_implicit_exceptions(builtins_exceptions[c]) + return w_res + # *any* exception for non-builtins + self.frame.handle_implicit_exceptions([Exception]) return w_res def find_global(self, w_globals, varname): @@ -462,82 +401,61 @@ raise FlowingError(self.frame, self.wrap(message)) return self.wrap(value) -def make_op(name, arity): +def make_impure_op(oper): + def generic_operator(self, *args_w): + if len(args_w) != oper.arity: + raise TypeError(oper.name + " got the wrong number of arguments") + w_result = self.frame.do_operation_with_implicit_exceptions(oper.name, *args_w) + return w_result + return generic_operator + +def make_op(oper): """Add function operation to the flow space.""" - if getattr(FlowObjSpace, name, None) is not None: - return - - op = None - skip = False - arithmetic = False - - if (name.startswith('del') or - name.startswith('set') or - name.startswith('inplace_')): - # skip potential mutators - skip = True - elif name in ('id', 'hash', 'iter', 'userdel'): - # skip potential runtime context dependecies - skip = True - elif name in ('repr', 'str'): - rep = getattr(__builtin__, name) - def op(obj): - s = rep(obj) - if "at 0x" in s: - print >>sys.stderr, "Warning: captured address may be awkward" - return s - else: - op = operation.FunctionByName[name] - arithmetic = (name + '_ovf') in operation.FunctionByName - - if not op and not skip: - raise ValueError("XXX missing operator: %s" % (name,)) + name = oper.name + func = oper.pyfunc def generic_operator(self, *args_w): - assert len(args_w) == arity, name + " got the wrong number of arguments" - if op: - args = [] - for w_arg in args_w: - try: - arg = self.unwrap_for_computation(w_arg) - except UnwrapException: - break + assert len(args_w) == oper.arity, name + " got the wrong number of arguments" + args = [] + if all(w_arg.foldable() for w_arg in args_w): + args = [w_arg.value for w_arg in args_w] + # All arguments are constants: call the operator now + try: + result = func(*args) + except Exception, e: + etype = e.__class__ + msg = "%s%r always raises %s: %s" % ( + name, tuple(args), etype, e) + raise FlowingError(self.frame, msg) + else: + # don't try to constant-fold operations giving a 'long' + # result. The result is probably meant to be sent to + # an intmask(), but the 'long' constant confuses the + # annotator a lot. + if oper.can_overflow and type(result) is long: + pass + # don't constant-fold getslice on lists, either + elif name == 'getslice' and type(result) is list: + pass + # otherwise, fine else: - args.append(arg) - else: - # All arguments are constants: call the operator now - try: - result = op(*args) - except Exception, e: - etype = e.__class__ - msg = "%s%r always raises %s: %s" % ( - name, tuple(args), etype, e) - raise FlowingError(self.frame, msg) - else: - # don't try to constant-fold operations giving a 'long' - # result. The result is probably meant to be sent to - # an intmask(), but the 'long' constant confuses the - # annotator a lot. - if arithmetic and type(result) is long: + try: + return self.wrap(result) + except WrapException: + # type cannot sanely appear in flow graph, + # store operation with variable result instead pass - # don't constant-fold getslice on lists, either - elif name == 'getslice' and type(result) is list: - pass - # otherwise, fine - else: - try: - return self.wrap(result) - except WrapException: - # type cannot sanely appear in flow graph, - # store operation with variable result instead - pass w_result = self.frame.do_operation_with_implicit_exceptions(name, *args_w) return w_result + return generic_operator - setattr(FlowObjSpace, name, generic_operator) - -for (name, symbol, arity, specialnames) in operation.MethodTable: - make_op(name, arity) +for oper in operation.op.__dict__.values(): + if getattr(FlowObjSpace, oper.name, None) is None: + if oper.pure: + op_method = make_op(oper) + else: + op_method = make_impure_op(oper) + setattr(FlowObjSpace, oper.name, op_method) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -8,93 +8,53 @@ import operator from rpython.tool.sourcetools import compile2 from rpython.rlib.rarithmetic import ovfcheck +from rpython.flowspace.model import Constant -# this is a copy that should be shared with standard objspace +class _OpHolder(object): pass +op = _OpHolder() -MethodTable = [ -# method name # symbol # number of arguments # special method name(s) - ('is_', 'is', 2, []), - ('id', 'id', 1, []), - ('type', 'type', 1, []), - ('isinstance', 'isinstance', 2, ['__instancecheck__']), - ('issubtype', 'issubtype', 2, ['__subclasscheck__']), # not for old-style classes - ('repr', 'repr', 1, ['__repr__']), - ('str', 'str', 1, ['__str__']), - ('format', 'format', 2, ['__format__']), - ('len', 'len', 1, ['__len__']), - ('hash', 'hash', 1, ['__hash__']), - ('getattr', 'getattr', 2, ['__getattribute__']), - ('setattr', 'setattr', 3, ['__setattr__']), - ('delattr', 'delattr', 2, ['__delattr__']), - ('getitem', 'getitem', 2, ['__getitem__']), - ('setitem', 'setitem', 3, ['__setitem__']), - ('delitem', 'delitem', 2, ['__delitem__']), - ('getslice', 'getslice', 3, ['__getslice__']), - ('setslice', 'setslice', 4, ['__setslice__']), - ('delslice', 'delslice', 3, ['__delslice__']), - ('trunc', 'trunc', 1, ['__trunc__']), - ('pos', 'pos', 1, ['__pos__']), - ('neg', 'neg', 1, ['__neg__']), - ('nonzero', 'truth', 1, ['__nonzero__']), - ('abs' , 'abs', 1, ['__abs__']), - ('hex', 'hex', 1, ['__hex__']), - ('oct', 'oct', 1, ['__oct__']), - ('ord', 'ord', 1, []), - ('invert', '~', 1, ['__invert__']), - ('add', '+', 2, ['__add__', '__radd__']), - ('sub', '-', 2, ['__sub__', '__rsub__']), - ('mul', '*', 2, ['__mul__', '__rmul__']), - ('truediv', '/', 2, ['__truediv__', '__rtruediv__']), - ('floordiv', '//', 2, ['__floordiv__', '__rfloordiv__']), - ('div', 'div', 2, ['__div__', '__rdiv__']), - ('mod', '%', 2, ['__mod__', '__rmod__']), - ('divmod', 'divmod', 2, ['__divmod__', '__rdivmod__']), - ('pow', '**', 3, ['__pow__', '__rpow__']), - ('lshift', '<<', 2, ['__lshift__', '__rlshift__']), - ('rshift', '>>', 2, ['__rshift__', '__rrshift__']), - ('and_', '&', 2, ['__and__', '__rand__']), - ('or_', '|', 2, ['__or__', '__ror__']), - ('xor', '^', 2, ['__xor__', '__rxor__']), - ('int', 'int', 1, ['__int__']), - ('index', 'index', 1, ['__index__']), - ('float', 'float', 1, ['__float__']), - ('long', 'long', 1, ['__long__']), - ('inplace_add', '+=', 2, ['__iadd__']), - ('inplace_sub', '-=', 2, ['__isub__']), - ('inplace_mul', '*=', 2, ['__imul__']), - ('inplace_truediv', '/=', 2, ['__itruediv__']), - ('inplace_floordiv','//=', 2, ['__ifloordiv__']), - ('inplace_div', 'div=', 2, ['__idiv__']), - ('inplace_mod', '%=', 2, ['__imod__']), - ('inplace_pow', '**=', 2, ['__ipow__']), - ('inplace_lshift', '<<=', 2, ['__ilshift__']), - ('inplace_rshift', '>>=', 2, ['__irshift__']), - ('inplace_and', '&=', 2, ['__iand__']), - ('inplace_or', '|=', 2, ['__ior__']), - ('inplace_xor', '^=', 2, ['__ixor__']), - ('lt', '<', 2, ['__lt__', '__gt__']), - ('le', '<=', 2, ['__le__', '__ge__']), - ('eq', '==', 2, ['__eq__', '__eq__']), - ('ne', '!=', 2, ['__ne__', '__ne__']), - ('gt', '>', 2, ['__gt__', '__lt__']), - ('ge', '>=', 2, ['__ge__', '__le__']), - ('cmp', 'cmp', 2, ['__cmp__']), # rich cmps preferred - ('coerce', 'coerce', 2, ['__coerce__', '__coerce__']), - ('contains', 'contains', 2, ['__contains__']), - ('iter', 'iter', 1, ['__iter__']), - ('next', 'next', 1, ['next']), -# ('call', 'call', 3, ['__call__']), - ('get', 'get', 3, ['__get__']), - ('set', 'set', 3, ['__set__']), - ('delete', 'delete', 2, ['__delete__']), - ('userdel', 'del', 1, ['__del__']), - ('buffer', 'buffer', 1, ['__buffer__']), # see buffer.py - ] +func2op = {} +class SpaceOperator(object): + def __init__(self, name, arity, symbol, pyfunc, pure=False, + can_overflow=False): + self.name = name + self.arity = arity + self.symbol = symbol + self.pyfunc = pyfunc + self.pure = pure + self.can_overflow = can_overflow + self.canraise = [] -FunctionByName = {} # dict {"operation_name": } -OperationName = {} # dict {: "operation_name"} -Arity = {} # dict {"operation name": number of arguments} + def make_sc(self): + def sc_operator(space, args_w): + if len(args_w) != self.arity: + if self is op.pow and len(args_w) == 2: + args_w = args_w + [Constant(None)] + elif self is op.getattr and len(args_w) == 3: + return space.frame.do_operation('simple_call', Constant(getattr), *args_w) + else: + raise Exception("should call %r with exactly %d arguments" % ( + self.name, self.arity)) + # completely replace the call with the underlying + # operation and its limited implicit exceptions semantic + return getattr(space, self.name)(*args_w) + return sc_operator + + +def add_operator(name, arity, symbol, pyfunc=None, pure=False, ovf=False): + operator_func = getattr(operator, name, None) + oper = SpaceOperator(name, arity, symbol, pyfunc, pure, can_overflow=ovf) + setattr(op, name, oper) + if pyfunc is not None: + func2op[pyfunc] = oper + if operator_func: + func2op[operator_func] = oper + if pyfunc is None: + oper.pyfunc = operator_func + if ovf: + ovf_func = lambda *args: ovfcheck(oper.pyfunc(*args)) + add_operator(name + '_ovf', arity, symbol, pyfunc=ovf_func) # ____________________________________________________________ @@ -186,33 +146,6 @@ def userdel(x): x.__del__() -def neg_ovf(x): - return ovfcheck(-x) - -def abs_ovf(x): - return ovfcheck(abs(x)) - -def add_ovf(x, y): - return ovfcheck(x + y) - -def sub_ovf(x, y): - return ovfcheck(x - y) - -def mul_ovf(x, y): - return ovfcheck(x * y) - -def floordiv_ovf(x, y): - return ovfcheck(operator.floordiv(x, y)) - -def div_ovf(x, y): - return ovfcheck(operator.div(x, y)) - -def mod_ovf(x, y): - return ovfcheck(x % y) - -def lshift_ovf(x, y): - return ovfcheck(x << y) - # slicing: operator.{get,set,del}slice() don't support b=None or c=None def do_getslice(a, b, c): return a[b:c] @@ -226,101 +159,91 @@ def unsupported(*args): raise ValueError("this is not supported") -# ____________________________________________________________ -# The following table can list several times the same operation name, -# if multiple built-in functions correspond to it. The first one should -# be picked, though, as the best built-in for the given operation name. -# Lines ('name', operator.name) are added automatically. +add_operator('is_', 2, 'is', pure=True) +add_operator('id', 1, 'id', pyfunc=id) +add_operator('type', 1, 'type', pyfunc=new_style_type, pure=True) +add_operator('isinstance', 2, 'isinstance', pyfunc=isinstance, pure=True) +add_operator('issubtype', 2, 'issubtype', pyfunc=issubclass, pure=True) # not for old-style classes +add_operator('repr', 1, 'repr', pyfunc=repr, pure=True) +add_operator('str', 1, 'str', pyfunc=str, pure=True) +add_operator('format', 2, 'format', pyfunc=unsupported) +add_operator('len', 1, 'len', pyfunc=len, pure=True) +add_operator('hash', 1, 'hash', pyfunc=hash) +add_operator('getattr', 2, 'getattr', pyfunc=getattr, pure=True) +add_operator('setattr', 3, 'setattr', pyfunc=setattr) +add_operator('delattr', 2, 'delattr', pyfunc=delattr) +add_operator('getitem', 2, 'getitem', pure=True) +add_operator('setitem', 3, 'setitem') +add_operator('delitem', 2, 'delitem') +add_operator('getslice', 3, 'getslice', pyfunc=do_getslice, pure=True) +add_operator('setslice', 4, 'setslice', pyfunc=do_setslice) +add_operator('delslice', 3, 'delslice', pyfunc=do_delslice) +add_operator('trunc', 1, 'trunc', pyfunc=unsupported) +add_operator('pos', 1, 'pos', pure=True) +add_operator('neg', 1, 'neg', pure=True, ovf=True) +add_operator('nonzero', 1, 'truth', pyfunc=bool, pure=True) +op.is_true = op.nonzero +add_operator('abs' , 1, 'abs', pyfunc=abs, pure=True, ovf=True) +add_operator('hex', 1, 'hex', pyfunc=hex, pure=True) +add_operator('oct', 1, 'oct', pyfunc=oct, pure=True) +add_operator('ord', 1, 'ord', pyfunc=ord, pure=True) +add_operator('invert', 1, '~', pure=True) +add_operator('add', 2, '+', pure=True, ovf=True) +add_operator('sub', 2, '-', pure=True, ovf=True) +add_operator('mul', 2, '*', pure=True, ovf=True) +add_operator('truediv', 2, '/', pure=True) +add_operator('floordiv', 2, '//', pure=True, ovf=True) +add_operator('div', 2, 'div', pure=True, ovf=True) +add_operator('mod', 2, '%', pure=True, ovf=True) +add_operator('divmod', 2, 'divmod', pyfunc=divmod, pure=True) +add_operator('pow', 3, '**', pyfunc=pow, pure=True) +add_operator('lshift', 2, '<<', pure=True, ovf=True) +add_operator('rshift', 2, '>>', pure=True) +add_operator('and_', 2, '&', pure=True) +add_operator('or_', 2, '|', pure=True) +add_operator('xor', 2, '^', pure=True) +add_operator('int', 1, 'int', pyfunc=do_int, pure=True) +add_operator('index', 1, 'index', pyfunc=do_index, pure=True) +add_operator('float', 1, 'float', pyfunc=do_float, pure=True) +add_operator('long', 1, 'long', pyfunc=do_long, pure=True) +add_operator('inplace_add', 2, '+=', pyfunc=inplace_add) +add_operator('inplace_sub', 2, '-=', pyfunc=inplace_sub) +add_operator('inplace_mul', 2, '*=', pyfunc=inplace_mul) +add_operator('inplace_truediv', 2, '/=', pyfunc=inplace_truediv) +add_operator('inplace_floordiv', 2, '//=', pyfunc=inplace_floordiv) +add_operator('inplace_div', 2, 'div=', pyfunc=inplace_div) +add_operator('inplace_mod', 2, '%=', pyfunc=inplace_mod) +add_operator('inplace_pow', 2, '**=', pyfunc=inplace_pow) +add_operator('inplace_lshift', 2, '<<=', pyfunc=inplace_lshift) +add_operator('inplace_rshift', 2, '>>=', pyfunc=inplace_rshift) +add_operator('inplace_and', 2, '&=', pyfunc=inplace_and) +add_operator('inplace_or', 2, '|=', pyfunc=inplace_or) +add_operator('inplace_xor', 2, '^=', pyfunc=inplace_xor) +add_operator('lt', 2, '<', pure=True) +add_operator('le', 2, '<=', pure=True) +add_operator('eq', 2, '==', pure=True) +add_operator('ne', 2, '!=', pure=True) +add_operator('gt', 2, '>', pure=True) +add_operator('ge', 2, '>=', pure=True) +add_operator('cmp', 2, 'cmp', pyfunc=cmp, pure=True) # rich cmps preferred +add_operator('coerce', 2, 'coerce', pyfunc=coerce, pure=True) +add_operator('contains', 2, 'contains', pure=True) +add_operator('iter', 1, 'iter', pyfunc=iter) +add_operator('next', 1, 'next', pyfunc=next) +#add_operator('call', 3, 'call') +add_operator('get', 3, 'get', pyfunc=get, pure=True) +add_operator('set', 3, 'set', pyfunc=set) +add_operator('delete', 2, 'delete', pyfunc=delete) +add_operator('userdel', 1, 'del', pyfunc=userdel) +add_operator('buffer', 1, 'buffer', pyfunc=buffer, pure=True) # see buffer.py -# INTERNAL ONLY, use the dicts declared at the top of the file. -Table = [ - ('id', id), - ('type', new_style_type), - ('type', type), - ('isinstance', isinstance), - ('issubtype', issubclass), - ('repr', repr), - ('str', str), - ('format', unsupported), - ('len', len), - ('hash', hash), - ('getattr', getattr), - ('setattr', setattr), - ('delattr', delattr), - ('nonzero', bool), - ('nonzero', operator.truth), - ('is_true', bool), - ('is_true', operator.truth), - ('trunc', unsupported), - ('abs' , abs), - ('hex', hex), - ('oct', oct), - ('ord', ord), - ('divmod', divmod), - ('pow', pow), - ('int', do_int), - ('index', do_index), - ('float', do_float), - ('long', do_long), - ('inplace_add', inplace_add), - ('inplace_sub', inplace_sub), - ('inplace_mul', inplace_mul), - ('inplace_truediv', inplace_truediv), - ('inplace_floordiv',inplace_floordiv), - ('inplace_div', inplace_div), - ('inplace_mod', inplace_mod), - ('inplace_pow', inplace_pow), - ('inplace_lshift', inplace_lshift), - ('inplace_rshift', inplace_rshift), - ('inplace_and', inplace_and), - ('inplace_or', inplace_or), From noreply at buildbot.pypy.org Tue Jul 16 08:38:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 16 Jul 2013 08:38:22 +0200 (CEST) Subject: [pypy-commit] cffi default: Mention Win64. Message-ID: <20130716063822.08DA61C14EB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1277:ab9e53ebcfb9 Date: 2013-07-16 08:38 +0200 http://bitbucket.org/cffi/cffi/changeset/ab9e53ebcfb9/ Log: Mention Win64. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -103,7 +103,7 @@ * ``python setup.py install`` or ``python setup_base.py install`` (should work out of the box on Linux or Windows; see below for - `MacOS 10.6`_) + `MacOS 10.6`_ or `Windows 64`_.) * or you can directly import and use ``cffi``, but if you don't compile the ``_cffi_backend`` extension module, it will fall back @@ -162,6 +162,20 @@ .. _here: http://superuser.com/questions/259278/python-2-6-1-pycrypto-2-3-pypi-package-broken-pipe-during-build +Windows 64 +++++++++++ + +Win32 works and is tested at least each official release. However, it +seems that compiling it for Win64 (explicitly *not* in Win32 mode) does +not work out of the box. According to `issue 9`_, this is because +distutils doesn't support .asm files. This can be resolved by applying +the patch from `Python issue 7546`_. + +.. _`issue 9`: https://bitbucket.org/cffi/cffi/issue/9 +.. _`Python issue 7546`: http://bugs.python.org/issue7546 + + + ======================================================= Examples From noreply at buildbot.pypy.org Tue Jul 16 09:02:00 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 16 Jul 2013 09:02:00 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: update contributor list Message-ID: <20130716070200.397531C14EB@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65410:73844c85b552 Date: 2013-07-11 10:05 +0200 http://bitbucket.org/pypy/pypy/changeset/73844c85b552/ Log: update contributor list diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -35,179 +35,242 @@ the beginning of each file) the files in the 'pypy' directory are each copyrighted by one or more of the following people and organizations: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Amaury Forgeot d'Arc - Antonio Cuni - Samuele Pedroni - Michael Hudson - Holger Krekel - Alex Gaynor - Christian Tismer - Hakan Ardo - Benjamin Peterson - David Schneider - Eric van Riet Paap - Anders Chrigstrom - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Lukas Diekmann - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Sven Hager - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Justin Peel - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Laura Creighton - Adrien Di Mascio - Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone - John Witulski - Timo Paulssen - holger krekel - Dario Bertini - Mark Pearse - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Ilya Osadchiy - Ronny Pfannschmidt - Adrian Kuhn - tav - Georg Brandl - Philip Jenvey - Gerald Klix - Wanja Saatkamp - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Jeff Terrace - Lukas Renggli - Guenter Jantzen - Ned Batchelder - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Artur Lisiecki - Bruno Gola - Ignas Mikalajunas - Stefano Rivera - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz - Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz From noreply at buildbot.pypy.org Tue Jul 16 09:02:01 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 16 Jul 2013 09:02:01 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: updates license information and indentation fixes Message-ID: <20130716070201.6F4B61C14EB@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65411:2d713d7a15bc Date: 2013-07-11 10:13 +0200 http://bitbucket.org/pypy/pypy/changeset/2d713d7a15bc/ Log: updates license information and indentation fixes diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -35,242 +35,242 @@ the beginning of each file) the files in the 'pypy' directory are each copyrighted by one or more of the following people and organizations: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Alex Gaynor - Michael Hudson - David Schneider - Holger Krekel - Christian Tismer - Hakan Ardo - Benjamin Peterson - Matti Picus - Philip Jenvey - Anders Chrigstrom - Brian Kearns - Eric van Riet Paap - Richard Emslie - Alexander Schremmer - Wim Lavrijsen - Dan Villiom Podlaski Christiansen - Manuel Jacob - Lukas Diekmann - Sven Hager - Anders Lehmann - Aurelien Campeas - Niklaus Haldimann - Ronan Lamy - Camillo Bruni - Laura Creighton - Toon Verwaest - Leonardo Santagada - Seo Sanghyeon - Justin Peel - Ronny Pfannschmidt - David Edelsohn - Anders Hammarquist - Jakub Gustak - Guido Wesdorp - Lawrence Oluyede - Bartosz Skowron - Daniel Roberts - Niko Matsakis - Adrien Di Mascio - Ludovic Aubry - Alexander Hesse - Jacob Hallen - Romain Guillebert - Jason Creighton - Alex Martelli - Michal Bendowski - Jan de Mooij - Michael Foord - Stephan Diehl - Stefan Schwarzer - Valentino Volonghi - Tomek Meka - Patrick Maupin - stian - Bob Ippolito - Bruno Gola - Jean-Paul Calderone - Timo Paulssen - Alexandre Fayolle - Simon Burton - Marius Gedminas - John Witulski - Greg Price - Dario Bertini - Mark Pearse - Simon Cross - Konstantin Lopuhin - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Paul deGrandis - Ilya Osadchiy - Adrian Kuhn - Boris Feigin - tav - Georg Brandl - Bert Freudenberg - Stian Andreassen - Stefano Rivera - Wanja Saatkamp - Gerald Klix - Mike Blume - Taavi Burns - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Preston Timmons - Jeff Terrace - David Ripton - Dusty Phillips - Lukas Renggli - Guenter Jantzen - Tobias Oberstein - Remi Meier - Ned Batchelder - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Jason Chu - Igor Trindade Oliveira - Jeremy Thurgood - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Lucian Branescu Mihaila - Tim Felgentreff - Tyler Wade - Gabriel Lavoie - Olivier Dormond - Jared Grubb - Karl Bartel - Brian Dorsey - Victor Stinner - Stuart Williams - Jasper Schulz - Toby Watson - Antoine Pitrou - Aaron Iles - Michael Cheng - Justas Sadzevicius - Gasper Zejn - Neil Shepperd - Mikael Schönenberg - Elmo Mäntynen - Tobias Pape - Jonathan David Riehl - Stanislaw Halik - Anders Qvist - Chirag Jadwani - Beatrice During - Alex Perry - Vincent Legoll - Alan McIntyre - Alexander Sedov - Corbin Simpson - Christopher Pope - Laurence Tratt - Guillebert Romain - Christian Tismer - Dan Stromberg - Stefano Parmesan - Christian Hudon - Alexis Daboville - Jens-Uwe Mager - Carl Meyer - Karl Ramm - Pieter Zieschang - Gabriel - Paweł Piotr Przeradowski - Andrew Dalke - Sylvain Thenault - Nathan Taylor - Vladimir Kryachko - Jacek Generowicz - Alejandro J. Cura - Jacob Oscarson - Travis Francis Athougies - Kristjan Valur Jonsson - Neil Blakey-Milner - Lutz Paelike - Lucio Torre - Lars Wassermann - Henrik Vendelbo - Dan Buch - Miguel de Val Borro - Artur Lisiecki - Sergey Kishchenko - Ignas Mikalajunas - Christoph Gerum - Martin Blais - Lene Wagner - Tomo Cocoa - Andrews Medina - roberto at goyle - William Leslie - Bobby Impollonia - timo at eistee.fritz.box - Andrew Thompson - Yusei Tahara - Roberto De Ioris - Juan Francisco Cantero Hurtado - Godefroid Chappelle - Joshua Gilbert - Dan Colish - Christopher Armstrong - Michael Hudson-Doyle - Anders Sigfridsson - Yasir Suhail - Floris Bruynooghe - Akira Li - Gustavo Niemeyer - Stephan Busemann - Anna Katrina Dominguez - Christian Muirhead - James Lan - shoma hosaka - Daniel Neuhäuser - Buck Golemon - Konrad Delong - Dinu Gherman - Chris Lambacher - coolbutuseless at gmail.com - Jim Baker - Rodrigo Araújo - Armin Ronacher - Brett Cannon - yrttyr - Zooko Wilcox-O Hearn - Tomer Chachamu - Christopher Groskopf - opassembler.py - Antony Lee - Jim Hunziker - Markus Unterwaditzer - Even Wiik Thomassen - jbs - soareschen - Flavio Percoco - Kristoffer Kleine - yasirs - Michael Chermside - Anna Ravencroft - Andrew Chambers - Julien Phalip - Dan Loewenherz + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -287,27 +287,26 @@ by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: From noreply at buildbot.pypy.org Tue Jul 16 09:02:02 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 16 Jul 2013 09:02:02 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Merged in squeaky/pypy-ldflags/package-tk (pull request #159) Message-ID: <20130716070202.C7BD41C14EB@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65412:914b6102809a Date: 2013-07-11 10:27 +0200 http://bitbucket.org/pypy/pypy/changeset/914b6102809a/ Log: Merged in squeaky/pypy-ldflags/package-tk (pull request #159) package Tkinter diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,7 +3,7 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py root-pypy-dir [--nostrip] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] + package.py root-pypy-dir [--nostrip] [--without-tk] [name-of-archive] [name-of-pypy-c] [destination-for-tarball] [pypy-c-path] Usually you would do: package.py ../../.. pypy-VER-PLATFORM The output is found in the directory /tmp/usession-YOURNAME/build/. @@ -46,7 +46,8 @@ os.system("chmod -R g-w %s" % basedir) def package(basedir, name='pypy-nightly', rename_pypy_c='pypy', - copy_to_dir = None, override_pypy_c = None, nostrip=False): + copy_to_dir=None, override_pypy_c=None, nostrip=False, + withouttk=False): basedir = py.path.local(basedir) if override_pypy_c is None: basename = 'pypy-c' @@ -70,6 +71,14 @@ if not sys.platform == 'win32': subprocess.check_call([str(pypy_c), '-c', 'import _curses']) subprocess.check_call([str(pypy_c), '-c', 'import syslog']) + if not withouttk: + try: + subprocess.check_call([str(pypy_c), '-c', 'import _tkinter']) + except subprocess.CalledProcessError: + print >>sys.stderr, """Building Tk bindings failed. +You can either install Tk development headers package or +add --without-tk option to skip packaging binary CFFI extension.""" + sys.exit(1) if sys.platform == 'win32' and not rename_pypy_c.lower().endswith('.exe'): rename_pypy_c += '.exe' binaries = [(pypy_c, rename_pypy_c)] @@ -183,14 +192,28 @@ print "Ready in %s" % (builddir,) return builddir # for tests + +def print_usage(): + print >>sys.stderr, __doc__ + sys.exit(1) + + if __name__ == '__main__': if len(sys.argv) == 1: - print >>sys.stderr, __doc__ - sys.exit(1) - else: - args = sys.argv[1:] - kw = {} - if args[0] == '--nostrip': + print_usage() + + args = sys.argv[1:] + kw = {} + + for i, arg in enumerate(args): + if arg == '--nostrip': kw['nostrip'] = True - args = args[1:] - package(*args, **kw) + elif arg == '--without-tk': + kw['withouttk'] = True + elif not arg.startswith('--'): + break + else: + print_usage() + + args = args[i:] + package(*args, **kw) From noreply at buildbot.pypy.org Tue Jul 16 09:29:40 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 16 Jul 2013 09:29:40 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: fix rpython-level exception Message-ID: <20130716072940.E57211C14BE@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: release-2.1.x Changeset: r65413:bff3c14f507a Date: 2013-07-11 19:34 +0200 http://bitbucket.org/pypy/pypy/changeset/bff3c14f507a/ Log: fix rpython-level exception diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -74,7 +74,7 @@ return space.newtuple([w_fileobj, w_filename, w_import_info]) def load_module(space, w_name, w_file, w_filename, w_info): - w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info) + w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info, 3) filename = space.str0_w(w_filename) filemode = space.str_w(w_filemode) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -679,6 +679,10 @@ assert module.__name__ == 'a' assert module.__file__ == 'invalid_path_name' + def test_crash_load_module(self): + import imp + raises(ValueError, imp.load_module, "", "", "", [1, 2, 3, 4]) + class TestAbi: def test_abi_tag(self): From noreply at buildbot.pypy.org Tue Jul 16 09:29:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 16 Jul 2013 09:29:42 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Issue 1533: fix an RPython-level OverflowError for Message-ID: <20130716072942.992D21C14BE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65414:42cfbd794cb9 Date: 2013-07-11 21:55 +0200 http://bitbucket.org/pypy/pypy/changeset/42cfbd794cb9/ Log: Issue 1533: fix an RPython-level OverflowError for space.float_w(w_big_long_number). diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1107,6 +1107,14 @@ S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) S2E.get_ffi_type() # does not hang + def test_overflow_error(self): + import _rawffi + A = _rawffi.Array('d') + arg1 = A(1) + raises(OverflowError, "arg1[0] = 10**900") + arg1.free() + + class AppTestAutoFree: spaceconfig = dict(usemodules=['_rawffi', 'struct']) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -126,10 +126,7 @@ return W_ComplexObject(w_int.intval, 0.0) def delegate_Long2Complex(space, w_long): - try: - dval = w_long.tofloat() - except OverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(str(e))) + dval = w_long.tofloat(space) return W_ComplexObject(dval, 0.0) def delegate_Float2Complex(space, w_float): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -62,11 +62,7 @@ # long-to-float delegation def delegate_Long2Float(space, w_longobj): - try: - return W_FloatObject(w_longobj.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return W_FloatObject(w_longobj.tofloat(space)) # float__Float is supposed to do nothing, unless it has diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -26,8 +26,12 @@ def longval(self): return self.num.tolong() - def tofloat(self): - return self.num.tofloat() + def tofloat(self, space): + try: + return self.num.tofloat() + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("long int too large to convert to float")) def toint(self): return self.num.toint() @@ -66,7 +70,7 @@ return w_self.num def float_w(self, space): - return self.num.tofloat() + return self.tofloat(space) def int(self, space): if (type(self) is not W_LongObject and @@ -124,11 +128,7 @@ return long__Long(space, w_value) def float__Long(space, w_longobj): - try: - return space.newfloat(w_longobj.num.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return space.newfloat(w_longobj.tofloat(space)) def repr__Long(space, w_long): return space.wrap(w_long.num.repr()) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -18,6 +18,12 @@ w_obj = fromlong(42) assert space.unwrap(w_obj) == 42 + def test_overflow_error(self): + space = self.space + fromlong = lobj.W_LongObject.fromlong + w_big = fromlong(10**900) + space.raises_w(space.w_OverflowError, space.float_w, w_big) + def test_rint_variants(self): py.test.skip("XXX broken!") from rpython.rtyper.tool.rfficache import platform From noreply at buildbot.pypy.org Tue Jul 16 09:29:43 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 16 Jul 2013 09:29:43 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: fix issue 1537 with numpypy __array_interface__ Message-ID: <20130716072943.DAA181C14BE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: release-2.1.x Changeset: r65415:e64857a85f0e Date: 2013-07-14 20:02 +0300 http://bitbucket.org/pypy/pypy/changeset/e64857a85f0e/ Log: fix issue 1537 with numpypy __array_interface__ diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -280,7 +280,7 @@ backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): - return rffi.cast(lltype.Signed, self.storage) + return rffi.cast(lltype.Signed, self.storage) + self.start def get_storage(self): return self.storage diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2212,6 +2212,11 @@ a = a[::2] i = a.__array_interface__ assert isinstance(i['data'][0], int) + b = array(range(9), dtype=int) + c = b[3:5] + b_data = b.__array_interface__['data'][0] + c_data = c.__array_interface__['data'][0] + assert b_data + 3 * b.dtype.itemsize == c_data def test_array_indexing_one_elem(self): from numpypy import array, arange From noreply at buildbot.pypy.org Tue Jul 16 10:23:26 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 16 Jul 2013 10:23:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: another task Message-ID: <20130716082326.4767D1C13AB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r4986:b8d267e3bfc9 Date: 2013-07-16 01:23 -0700 http://bitbucket.org/pypy/extradoc/changeset/b8d267e3bfc9/ Log: another task diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -44,6 +44,9 @@ subq %rdx, %rax ret +- ConstantFloat(0.0) should be generated with pxor %xmm, %xmm; instead of a + movabs. + OPTIMIZATIONS ------------- From noreply at buildbot.pypy.org Tue Jul 16 10:33:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 16 Jul 2013 10:33:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Done Message-ID: <20130716083326.651021C13AB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4987:4e4eab8c56a3 Date: 2013-07-16 10:33 +0200 http://bitbucket.org/pypy/extradoc/changeset/4e4eab8c56a3/ Log: Done diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -32,18 +32,6 @@ - implement small tuples, there are a lot of places where they are hashed and compared -- implement INT_ABS in the JIT, currently jtransform generates a call to an - inlined function, which does the trivial branching. However, GCC shows that - it can be done branchless in the ASM (as we do for FLOAT_ABS, which is easier - because it has an explicit sign bit). GCC generates: - - movq %rdi, %rdx - sarq $63, %rdx - movq %rdx, %rax - xorq %rdi, %rax - subq %rdx, %rax - ret - - ConstantFloat(0.0) should be generated with pxor %xmm, %xmm; instead of a movabs. From noreply at buildbot.pypy.org Tue Jul 16 10:33:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 16 Jul 2013 10:33:34 +0200 (CEST) Subject: [pypy-commit] pypy default: A no-branch version of int_abs() Message-ID: <20130716083334.93AA51C13AB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65416:31e55d332904 Date: 2013-07-16 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/31e55d332904/ Log: A no-branch version of int_abs() diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -7,6 +7,7 @@ from rpython.rlib import rgc from rpython.rlib.jit import elidable, oopspec from rpython.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask +from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper import rlist from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.extregistry import ExtRegistryEntry @@ -272,10 +273,9 @@ return result def _ll_1_int_abs(x): - if x < 0: - return -x - else: - return x + # this version doesn't branch + mask = x >> (LONG_BIT - 1) + return (x ^ mask) - mask def _ll_1_cast_uint_to_float(x): # XXX on 32-bit platforms, this should be done using cast_longlong_to_float diff --git a/rpython/jit/codewriter/test/test_support.py b/rpython/jit/codewriter/test/test_support.py --- a/rpython/jit/codewriter/test/test_support.py +++ b/rpython/jit/codewriter/test/test_support.py @@ -1,8 +1,9 @@ -import py +import py, sys from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.annlowlevel import llstr from rpython.flowspace.model import Variable, Constant, SpaceOperation from rpython.jit.codewriter.support import decode_builtin_call, LLtypeHelpers +from rpython.jit.codewriter.support import _ll_1_int_abs def newconst(x): return Constant(x, lltype.typeOf(x)) @@ -133,3 +134,12 @@ py.test.raises(IndexError, func, p1, llstr("w")) py.test.raises(AttributeError, func, p1, llstr(None)) py.test.raises(AttributeError, func, llstr(None), p2) + +def test_int_abs(): + assert _ll_1_int_abs(0) == 0 + assert _ll_1_int_abs(1) == 1 + assert _ll_1_int_abs(10) == 10 + assert _ll_1_int_abs(sys.maxint) == sys.maxint + assert _ll_1_int_abs(-1) == 1 + assert _ll_1_int_abs(-10) == 10 + assert _ll_1_int_abs(-sys.maxint) == sys.maxint From noreply at buildbot.pypy.org Tue Jul 16 10:45:58 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 16 Jul 2013 10:45:58 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Remove a task that is done and a task that points to a non-existant paste. Message-ID: <20130716084558.45FBB1C02BA@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r4988:6069ab9f97ba Date: 2013-07-16 01:45 -0700 http://bitbucket.org/pypy/extradoc/changeset/6069ab9f97ba/ Log: Remove a task that is done and a task that points to a non-existant paste. diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -89,8 +89,6 @@ Extracted from some real-life Python programs, examples that don't give nice code at all so far: -- let super() work with the method cache. - - ((turn max(x, y)/min(x, y) into MAXSD, MINSD instructions when x and y are floats.)) (a mess, MAXSD/MINSD have different semantics WRT nan) @@ -98,11 +96,6 @@ BACKEND TASKS ------------- -- Look into this: http://paste.pocoo.org/show/450051/ - commenting out the first line of f makes ~30% improvement. This is due to - the fact of reordering locals and valuestack when jumping across incompatible - loops (for no good reason really, but it does make a lot of assembler) - LATER (maybe) TASKS ------------------- From noreply at buildbot.pypy.org Tue Jul 16 13:33:48 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 16 Jul 2013 13:33:48 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: new approach doing the work of copying over h_original in visit() Message-ID: <20130716113348.E40051C30A3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original2 Changeset: r401:7c2e94ae8bf1 Date: 2013-07-16 13:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/7c2e94ae8bf1/ Log: new approach doing the work of copying over h_original in visit() diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -945,6 +945,7 @@ revision_t my_lock = d->my_lock; wlog_t *item; + dprintf(("acquire_locks\n")); assert(!stm_has_got_any_lock(d)); assert(d->public_descriptor->stolen_objects.size == 0); @@ -957,6 +958,7 @@ revision_t v; retry: assert(R->h_tid & GCFLAG_PUBLIC); + assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); v = ACCESS_ONCE(R->h_revision); if (IS_POINTER(v)) /* "has a more recent revision" */ { @@ -989,7 +991,7 @@ static void CancelLocks(struct tx_descriptor *d) { wlog_t *item; - + dprintf(("cancel_locks\n")); if (!g2l_any_entry(&d->public_to_private)) return; @@ -1257,7 +1259,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - + dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); @@ -1341,6 +1343,7 @@ d->active = 2; d->reads_size_limit_nonatomic = 0; update_reads_size_limit(d); + dprintf(("make_inevitable(%p)\n", d)); } static revision_t acquire_inev_mutex_and_mark_global_cur_time( diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -223,6 +223,7 @@ id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ id_copy->h_tid |= GCFLAG_VISITED; + assert(id_copy->h_tid & GCFLAG_OLD); /* XXX: may not always need tracing? */ if (!(id_copy->h_tid & GCFLAG_STUB)) @@ -236,6 +237,55 @@ } } +static gcptr copy_over_original(gcptr obj) +{ + assert(!(obj->h_tid & GCFLAG_VISITED)); + assert(!(obj->h_tid & GCFLAG_STUB)); + + if (obj->h_tid & GCFLAG_PUBLIC /* XXX: required? */ + && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) + && obj->h_original) { + + gcptr id_copy = (gcptr)obj->h_original; + assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ + if (!(id_copy->h_tid & GCFLAG_PUBLIC)) + assert(0); + /* return NULL; */ /* could be priv_from_protected with + where backup is stolen and its h-original + points to it. */ + + assert(stmgc_size(id_copy) == stmgc_size(obj)); + /* prehash may be specific hash value for prebuilts, or 0 */ + revision_t prehash = id_copy->h_original; + assert(IMPLIES(prehash, id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + /* old_tid may have prebuilt_original flags that should not be lost */ + revision_t old_tid = id_copy->h_tid; + + memcpy(id_copy, obj, stmgc_size(obj)); + assert(!((id_copy->h_tid ^ old_tid) + & (GCFLAG_BACKUP_COPY //| GCFLAG_STUB, id_copy may be stub + | GCFLAG_PUBLIC | GCFLAG_HAS_ID + | GCFLAG_PRIVATE_FROM_PROTECTED))); + id_copy->h_original = prehash; + id_copy->h_tid = old_tid & ~GCFLAG_VISITED; /* will be visited next */ + + dprintf(("copy %p over %p\n", obj, id_copy)); + + /* for those visiting later: */ + obj->h_revision = (revision_t)id_copy; + + /* mark as not old for transactions to fix their + public_to_private. Otherwise, inevitable transactions + would think their public obj was modified (also for + other transactions, but they can abort) */ + obj->h_tid &= ~GCFLAG_OLD; + + return id_copy; + } + + return NULL; +} + static void visit(gcptr *pobj) { gcptr obj = *pobj; @@ -248,7 +298,26 @@ assert(!(obj->h_tid & GCFLAG_STUB)); if (!(obj->h_tid & GCFLAG_VISITED)) { obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ + + gcptr next = copy_over_original(obj); + if (next) { + revision_t loc = (revision_t)pobj - offsetof(struct stm_object_s, + h_revision); + if ((gcptr)loc != next) + /* we don't want to set h_revision of 'next' to + 'next' itself, it was already set by + copy_over_original to a global head revision */ + *pobj = next; + obj = next; + + assert(obj->h_revision & 1); + assert(!(obj->h_tid & GCFLAG_VISITED)); + goto restart; + } + obj->h_tid |= GCFLAG_VISITED; + assert(obj->h_tid & GCFLAG_OLD); + gcptrlist_insert(&objects_to_trace, obj); keep_original_alive(obj); @@ -272,6 +341,8 @@ obj = (gcptr)(obj->h_revision - 2); if (!(obj->h_tid & GCFLAG_PUBLIC)) { prev_obj->h_tid |= GCFLAG_VISITED; + assert(prev_obj->h_tid & GCFLAG_OLD); + keep_original_alive(prev_obj); assert(*pobj == prev_obj); @@ -283,14 +354,14 @@ } } - if (!(obj->h_revision & 3)) { - /* obj is neither a stub nor a most recent revision: - completely ignore obj->h_revision */ + /* if (!(obj->h_revision & 3)) { */ + /* /\* obj is neither a stub nor a most recent revision: */ + /* completely ignore obj->h_revision *\/ */ - obj = (gcptr)obj->h_revision; - assert(obj->h_tid & GCFLAG_PUBLIC); - prev_obj->h_revision = (revision_t)obj; - } + /* obj = (gcptr)obj->h_revision; */ + /* assert(obj->h_tid & GCFLAG_PUBLIC); */ + /* prev_obj->h_revision = (revision_t)obj; */ + /* } */ *pobj = obj; goto restart; } @@ -314,10 +385,20 @@ } obj->h_tid |= GCFLAG_VISITED; - B->h_tid |= GCFLAG_VISITED; + assert(obj->h_tid & GCFLAG_OLD); assert(!(obj->h_tid & GCFLAG_STUB)); - assert(!(B->h_tid & GCFLAG_STUB)); - gcptrlist_insert2(&objects_to_trace, obj, B); + + if (B->h_tid & GCFLAG_OLD) { + B->h_tid |= GCFLAG_VISITED; + assert(!(B->h_tid & GCFLAG_STUB)); + gcptrlist_insert2(&objects_to_trace, obj, B); + } + else { + /* B was copied over its h_original */ + pobj = (gcptr *)&obj->h_revision; + obj = *pobj; + goto restart; + } if (IS_POINTER(B->h_revision)) { assert(B->h_tid & GCFLAG_PUBLIC); @@ -337,6 +418,7 @@ if (!(obj->h_tid & GCFLAG_VISITED)) { obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; + assert(obj->h_tid & GCFLAG_OLD); gcptrlist_insert(&objects_to_trace, obj); if (IS_POINTER(obj->h_revision)) { @@ -369,9 +451,11 @@ gcptr obj; for (; pobj != pend; pobj++) { obj = *pobj; + obj->h_tid &= ~GCFLAG_VISITED; assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - assert(IS_POINTER(obj->h_revision)); - visit((gcptr *)&obj->h_revision); + /* assert(IS_POINTER(obj->h_revision)); */ + + visit_keep(obj); } } @@ -397,37 +481,72 @@ static void mark_all_stack_roots(void) { + int i; + gcptr *items; struct tx_descriptor *d; + struct G2L new_public_to_private; + memset(&new_public_to_private, 0, sizeof(struct G2L)); + for (d = stm_tx_head; d; d = d->tx_next) { assert(!stm_has_got_any_lock(d)); /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); + /* some roots (^^^) can also be in this list, and + we may have a stolen priv_from_prot in here that, + when visited, resolves to its backup (or further) */ + items = d->old_objects_to_trace.items; + for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) { + visit(&items[i]); + gcptrlist_insert(&objects_to_trace, items[i]); + } + /* the thread-local object */ visit(d->thread_local_obj_ref); visit(&d->old_thread_local_obj); /* the current transaction's private copies of public objects */ wlog_t *item; + + /* transactions need to have their pub_to_priv fixed. Otherwise, + they'll think their objects got outdated. Only absolutely + necessary for inevitable transactions (XXX check performance?). */ + dprintf(("start fixup (%p):\n", d)); + G2L_LOOP_FORWARD(d->public_to_private, item) { + gcptr R = item->addr; + gcptr L = item->val; + if (!(R->h_tid & GCFLAG_OLD)) { + /* R was copied over its original */ + gcptr new_R = (gcptr)R->h_original; + /* gcptrlist_insert(&objects_to_trace, new_R); */ + + g2l_insert(&new_public_to_private, new_R, L); + G2L_LOOP_DELETE(item); + + if (L && L->h_revision == (revision_t)R) { + L->h_revision = (revision_t)new_R; + dprintf((" fixup %p to %p <-> %p\n", R, new_R, L)); + } + else { + dprintf((" fixup %p to %p -> %p\n", R, new_R, L)); + } + } + } G2L_LOOP_END; + + /* reinsert to real pub_to_priv */ + G2L_LOOP_FORWARD(new_public_to_private, item) { + g2l_insert(&d->public_to_private, item->addr, item->val); + } G2L_LOOP_END; + g2l_clear(&new_public_to_private); + + /* now visit them */ G2L_LOOP_FORWARD(d->public_to_private, item) { /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ gcptr R = item->addr; gcptr L = item->val; - /* Objects that were not visited yet must have the PUB_TO_PRIV - flag. Except if that transaction will abort anyway, then it - may be removed from a previous major collection that didn't - fix the PUB_TO_PRIV because the transaction was going to - abort anyway: - 1. minor_collect before major collect (R->L, R is outdated, abort) - 2. major collect removes flag - 3. major collect again, same thread, no time to abort - 4. flag still removed - */ - assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, - R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); visit_keep(R); if (L != NULL) { /* minor collection found R->L in public_to_young @@ -462,6 +581,9 @@ assert(gcptrlist_size(&d->private_from_protected) == d->num_private_from_protected_known_old); } + + if (new_public_to_private.raw_start) + g2l_delete_not_used_any_more(&new_public_to_private); } static void cleanup_for_thread(struct tx_descriptor *d) @@ -477,6 +599,8 @@ for (i = d->private_from_protected.size - 1; i >= 0; i--) { gcptr obj = items[i]; assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + /* we don't copy private / protected objects over prebuilts (yet) */ + assert(obj->h_tid & GCFLAG_OLD); if (!(obj->h_tid & GCFLAG_VISITED)) { /* forget 'obj' */ @@ -484,6 +608,18 @@ } } + + /* we visit old_objects_to_trace during marking and thus, they + should be up-to-date */ +#ifdef _GC_DEBUG + items = d->old_objects_to_trace.items; + for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) { + gcptr obj = items[i]; + assert(obj->h_tid & GCFLAG_OLD); + assert(obj->h_tid & GCFLAG_VISITED); + } +#endif + /* If we're aborting this transaction anyway, we don't need to do * more here. */ @@ -500,15 +636,24 @@ gcptr obj = items[i]; assert(!(obj->h_tid & GCFLAG_STUB)); - /* Warning: in case the object listed is outdated and has been - replaced with a more recent revision, then it might be the - case that obj->h_revision doesn't have GCFLAG_VISITED, but - just removing it is very wrong --- we want 'd' to abort. - */ - if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + if (!(obj->h_tid & GCFLAG_OLD)) { + obj = (gcptr)obj->h_revision; + items[i] = obj; + } + else if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* Warning: in case the object listed is outdated and has been + replaced with a more recent revision, then it might be the + case that obj->h_revision doesn't have GCFLAG_VISITED, but + just removing it is very wrong --- we want 'd' to abort. + */ /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; + + /* backup copies will never be candidates for copy over + prebuilts, because there is always the priv-from-prot + object inbetween */ + assert(obj->h_tid & GCFLAG_OLD); } revision_t v = obj->h_revision; @@ -551,7 +696,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { assert(item->addr->h_tid & GCFLAG_VISITED); assert(item->val->h_tid & GCFLAG_VISITED); - + assert(item->addr->h_tid & GCFLAG_OLD); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, which becomes: */ diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -396,6 +396,10 @@ { long i, limit = d->num_read_objects_known_old; gcptr *items = d->list_of_read_objects.items; + + if (d->active < 0) + return; // aborts anyway + assert(d->list_of_read_objects.size >= limit); if (d->active == 2) { @@ -541,8 +545,9 @@ !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); - assert(gcptrlist_size(&d->list_of_read_objects) >= - d->num_read_objects_known_old); + assert(IMPLIES(d->active > 0, + gcptrlist_size(&d->list_of_read_objects) >= + d->num_read_objects_known_old)); assert(gcptrlist_size(&d->private_from_protected) >= d->num_private_from_protected_known_old); d->num_read_objects_known_old = diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -583,7 +583,9 @@ lib.stm_add_prebuilt_root(p1) def delegate_original(p1, p2): - assert p1.h_original == 0 + # no h_original or it is a prebuilt with a specified hash in h_original + assert (p1.h_original == 0) or (p1.h_tid & GCFLAG_PREBUILT_ORIGINAL) + assert p1.h_tid & GCFLAG_OLD assert p2.h_original == 0 assert p1 != p2 p2.h_original = ffi.cast("revision_t", p1) diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -205,13 +205,13 @@ p2 = oalloc(HDR); make_public(p2) delegate(p1, p2) delegate_original(p1, p2) - p2.h_original = ffi.cast("revision_t", p1) lib.stm_push_root(p1) major_collect() major_collect() p1b = lib.stm_pop_root() check_not_free(p1) # id copy - check_not_free(p2) + check_free_old(p2) + assert p1b == p1 def test_new_version_kill_intermediate(): @@ -273,7 +273,6 @@ delegate(p3, p4) delegate(p4, p5) rawsetptr(p1, 0, p3) - delegate_original(p3, p1) delegate_original(p3, p2) delegate_original(p3, p4) delegate_original(p3, p5) @@ -285,11 +284,8 @@ check_free_old(p2) check_not_free(p3) # original check_free_old(p4) - check_not_free(p5) - assert rawgetptr(p1, 0) == p5 - assert follow_original(p1) == p3 - assert follow_original(p5) == p3 - + check_free_old(p5) + assert rawgetptr(p1, 0) == p3 def test_prebuilt_version_1(): p1 = lib.pseudoprebuilt(HDR, 42 + HDR) @@ -308,6 +304,23 @@ check_free_old(p2) check_not_free(p3) # XXX replace with p1 +def test_prebuilt_version_2_copy_over_prebuilt(): + p1 = lib.pseudoprebuilt_with_hash(HDR, 42 + HDR, 99) + p2 = oalloc(HDR); make_public(p2) + p3 = oalloc(HDR); make_public(p3) + delegate(p1, p2) + delegate_original(p1, p2) + delegate(p2, p3) + delegate_original(p1, p3) + # added by delegate, remove, otherwise + # major_collect will not copy over prebuilt p1: + p1.h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE + major_collect() + check_prebuilt(p1) + assert lib.stm_hash(p1) == 99 + check_free_old(p2) + check_free_old(p3) + def test_prebuilt_version_to_protected(): p1 = lib.pseudoprebuilt(HDR, 42 + HDR) p2 = lib.stm_write_barrier(p1) @@ -321,6 +334,24 @@ check_prebuilt(p1) check_not_free(p2) # XXX replace with p1 +def test_prebuilt_version_to_protected_copy_over_prebuilt(): + py.test.skip("""current copy-over-prebuilt-original approach + does not work with public_prebuilt->stub->protected""") + p1 = lib.pseudoprebuilt(HDR, 42 + HDR) + p2 = lib.stm_write_barrier(p1) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + minor_collect() + p2 = lib.stm_read_barrier(p1) + assert p2 != p1 + minor_collect() + major_collect() + major_collect() + print classify(p2) + check_prebuilt(p1) + check_free_old(p2) + + def test_private(): p1 = nalloc(HDR) lib.stm_push_root(p1) @@ -396,8 +427,6 @@ print p2 major_collect() r.leave_in_parallel() - check_not_free(p2) - assert classify(p2) == "public" r.enter_in_parallel() perform_transaction(cb) r.leave_in_parallel() From noreply at buildbot.pypy.org Tue Jul 16 14:41:01 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 16 Jul 2013 14:41:01 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: update comment Message-ID: <20130716124101.8820E1C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original2 Changeset: r402:a127a39f6967 Date: 2013-07-16 14:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/a127a39f6967/ Log: update comment diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -637,6 +637,7 @@ assert(!(obj->h_tid & GCFLAG_STUB)); if (!(obj->h_tid & GCFLAG_OLD)) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); obj = (gcptr)obj->h_revision; items[i] = obj; } @@ -650,9 +651,7 @@ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; - /* backup copies will never be candidates for copy over - prebuilts, because there is always the priv-from-prot - object inbetween */ + /* the backup-ptr should already be updated: */ assert(obj->h_tid & GCFLAG_OLD); } From noreply at buildbot.pypy.org Tue Jul 16 14:41:02 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 16 Jul 2013 14:41:02 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: uncomment jumping forward in visit() again Message-ID: <20130716124102.B66681C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original2 Changeset: r403:7c66774a3b43 Date: 2013-07-16 14:40 +0200 http://bitbucket.org/pypy/stmgc/changeset/7c66774a3b43/ Log: uncomment jumping forward in visit() again diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -354,14 +354,14 @@ } } - /* if (!(obj->h_revision & 3)) { */ - /* /\* obj is neither a stub nor a most recent revision: */ - /* completely ignore obj->h_revision *\/ */ + if (!(obj->h_revision & 3)) { + /* obj is neither a stub nor a most recent revision: + completely ignore obj->h_revision */ - /* obj = (gcptr)obj->h_revision; */ - /* assert(obj->h_tid & GCFLAG_PUBLIC); */ - /* prev_obj->h_revision = (revision_t)obj; */ - /* } */ + obj = (gcptr)obj->h_revision; + assert(obj->h_tid & GCFLAG_PUBLIC); + prev_obj->h_revision = (revision_t)obj; + } *pobj = obj; goto restart; } From noreply at buildbot.pypy.org Tue Jul 16 14:57:49 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 16 Jul 2013 14:57:49 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: use NURSERY_MOVED instead of ~OLD when copying an object over its original during major collections Message-ID: <20130716125749.D77321C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original2 Changeset: r404:74ae9fa5621f Date: 2013-07-16 14:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/74ae9fa5621f/ Log: use NURSERY_MOVED instead of ~OLD when copying an object over its original during major collections diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -223,7 +223,7 @@ id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ id_copy->h_tid |= GCFLAG_VISITED; - assert(id_copy->h_tid & GCFLAG_OLD); + assert(!(id_copy->h_tid & GCFLAG_NURSERY_MOVED)); /* XXX: may not always need tracing? */ if (!(id_copy->h_tid & GCFLAG_STUB)) @@ -274,11 +274,11 @@ /* for those visiting later: */ obj->h_revision = (revision_t)id_copy; - /* mark as not old for transactions to fix their + /* mark as MOVED for transactions to fix their public_to_private. Otherwise, inevitable transactions would think their public obj was modified (also for other transactions, but they can abort) */ - obj->h_tid &= ~GCFLAG_OLD; + obj->h_tid |= GCFLAG_NURSERY_MOVED; return id_copy; } @@ -316,7 +316,7 @@ } obj->h_tid |= GCFLAG_VISITED; - assert(obj->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); gcptrlist_insert(&objects_to_trace, obj); @@ -341,7 +341,7 @@ obj = (gcptr)(obj->h_revision - 2); if (!(obj->h_tid & GCFLAG_PUBLIC)) { prev_obj->h_tid |= GCFLAG_VISITED; - assert(prev_obj->h_tid & GCFLAG_OLD); + assert(!(prev_obj->h_tid & GCFLAG_NURSERY_MOVED)); keep_original_alive(prev_obj); @@ -385,10 +385,10 @@ } obj->h_tid |= GCFLAG_VISITED; - assert(obj->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); assert(!(obj->h_tid & GCFLAG_STUB)); - if (B->h_tid & GCFLAG_OLD) { + if (!(B->h_tid & GCFLAG_NURSERY_MOVED)) { B->h_tid |= GCFLAG_VISITED; assert(!(B->h_tid & GCFLAG_STUB)); gcptrlist_insert2(&objects_to_trace, obj, B); @@ -418,7 +418,7 @@ if (!(obj->h_tid & GCFLAG_VISITED)) { obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; - assert(obj->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); gcptrlist_insert(&objects_to_trace, obj); if (IS_POINTER(obj->h_revision)) { @@ -516,7 +516,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { gcptr R = item->addr; gcptr L = item->val; - if (!(R->h_tid & GCFLAG_OLD)) { + if (R->h_tid & GCFLAG_NURSERY_MOVED) { /* R was copied over its original */ gcptr new_R = (gcptr)R->h_original; /* gcptrlist_insert(&objects_to_trace, new_R); */ @@ -600,7 +600,7 @@ gcptr obj = items[i]; assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); /* we don't copy private / protected objects over prebuilts (yet) */ - assert(obj->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); if (!(obj->h_tid & GCFLAG_VISITED)) { /* forget 'obj' */ @@ -615,7 +615,7 @@ items = d->old_objects_to_trace.items; for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) { gcptr obj = items[i]; - assert(obj->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); assert(obj->h_tid & GCFLAG_VISITED); } #endif @@ -636,7 +636,7 @@ gcptr obj = items[i]; assert(!(obj->h_tid & GCFLAG_STUB)); - if (!(obj->h_tid & GCFLAG_OLD)) { + if (obj->h_tid & GCFLAG_NURSERY_MOVED) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); obj = (gcptr)obj->h_revision; items[i] = obj; @@ -652,7 +652,7 @@ obj = (gcptr)obj->h_revision; /* the backup-ptr should already be updated: */ - assert(obj->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); } revision_t v = obj->h_revision; @@ -695,7 +695,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { assert(item->addr->h_tid & GCFLAG_VISITED); assert(item->val->h_tid & GCFLAG_VISITED); - assert(item->addr->h_tid & GCFLAG_OLD); + assert(!(item->addr->h_tid & GCFLAG_NURSERY_MOVED)); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, which becomes: */ From noreply at buildbot.pypy.org Tue Jul 16 15:09:01 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 16 Jul 2013 15:09:01 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: rename GCFLAG_NURSERY_MOVED to GCFLAG_MOVED Message-ID: <20130716130901.70A751C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original2 Changeset: r405:9a584030a9e6 Date: 2013-07-16 15:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/9a584030a9e6/ Log: rename GCFLAG_NURSERY_MOVED to GCFLAG_MOVED diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -302,7 +302,7 @@ } else { if (in_nursery(p)) { - assert(p->h_tid & GCFLAG_NURSERY_MOVED); + assert(p->h_tid & GCFLAG_MOVED); assert(!(p->h_revision & 1)); } return C_PUBLIC; diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -122,7 +122,7 @@ gcptr P_prev = P; P = (gcptr)v; assert((P->h_tid & GCFLAG_PUBLIC) || - (P_prev->h_tid & GCFLAG_NURSERY_MOVED)); + (P_prev->h_tid & GCFLAG_MOVED)); v = ACCESS_ONCE(P->h_revision); @@ -214,7 +214,7 @@ add_in_recent_reads_cache: /* The risks are that the following assert fails, because the flag was added just now by a parallel thread during stealing... */ - /*assert(!(P->h_tid & GCFLAG_NURSERY_MOVED));*/ + /*assert(!(P->h_tid & GCFLAG_MOVED));*/ fxcache_add(&d->recent_reads_cache, P); return P; @@ -257,7 +257,7 @@ */ if (P->h_tid & GCFLAG_PUBLIC) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) { P = (gcptr)P->h_revision; assert(P->h_tid & GCFLAG_PUBLIC); @@ -389,7 +389,7 @@ while (v = P->h_revision, IS_POINTER(v)) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) dprintf(("nursery_moved ")); if (v & 2) @@ -486,7 +486,7 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { assert(R->h_tid & GCFLAG_PUBLIC); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); #ifdef _GC_DEBUG wlog_t *entry; @@ -581,7 +581,7 @@ Add R into the list 'public_with_young_copy', unless W is actually an old object, in which case we need to record W. */ - if (R->h_tid & GCFLAG_NURSERY_MOVED) + if (R->h_tid & GCFLAG_MOVED) { /* Bah, the object turned into this kind of stub, possibly while we were waiting for the collection_lock, because it @@ -671,8 +671,8 @@ continue; } } - else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) - == (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) + else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_MOVED)) + == (GCFLAG_PUBLIC | GCFLAG_MOVED)) { /* such an object is identical to the one it points to (stolen protected young object with h_revision pointing @@ -1084,7 +1084,7 @@ assert(!(L->h_tid & GCFLAG_VISITED)); assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - assert(!(L->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(L->h_tid & GCFLAG_MOVED)); assert(L->h_revision != localrev); /* modified by AcquireLocks() */ #ifdef DUMP_EXTRA @@ -1131,7 +1131,7 @@ assert(R->h_tid & GCFLAG_PUBLIC); assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); assert(R->h_revision != localrev); #ifdef DUMP_EXTRA @@ -1226,7 +1226,7 @@ assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); P->h_tid |= GCFLAG_PUBLIC; assert(!(P->h_tid & GCFLAG_HAS_ID)); - if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_NURSERY_MOVED; + if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_MOVED; /* P becomes a public outdated object. It may create an exception documented in doc-objects.txt: a public but young object. It's still fine because it should only be seen by diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -46,7 +46,7 @@ * the list 'old_objects_to_trace'; it is set again at the next minor * collection. * - * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. + * GCFLAG_MOVED is used temporarily during minor/major collections. * * GCFLAG_STUB is set for debugging on stub objects made by stealing or * by major collections. 'p_stub->h_revision' might be a value @@ -67,7 +67,7 @@ static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_NURSERY_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; @@ -85,7 +85,7 @@ "PREBUILT_ORIGINAL", \ "PUBLIC_TO_PRIVATE", \ "WRITE_BARRIER", \ - "NURSERY_MOVED", \ + "MOVED", \ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -223,7 +223,7 @@ id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ id_copy->h_tid |= GCFLAG_VISITED; - assert(!(id_copy->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(id_copy->h_tid & GCFLAG_MOVED)); /* XXX: may not always need tracing? */ if (!(id_copy->h_tid & GCFLAG_STUB)) @@ -278,7 +278,7 @@ public_to_private. Otherwise, inevitable transactions would think their public obj was modified (also for other transactions, but they can abort) */ - obj->h_tid |= GCFLAG_NURSERY_MOVED; + obj->h_tid |= GCFLAG_MOVED; return id_copy; } @@ -316,7 +316,7 @@ } obj->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); gcptrlist_insert(&objects_to_trace, obj); @@ -341,7 +341,7 @@ obj = (gcptr)(obj->h_revision - 2); if (!(obj->h_tid & GCFLAG_PUBLIC)) { prev_obj->h_tid |= GCFLAG_VISITED; - assert(!(prev_obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(prev_obj->h_tid & GCFLAG_MOVED)); keep_original_alive(prev_obj); @@ -385,10 +385,10 @@ } obj->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); assert(!(obj->h_tid & GCFLAG_STUB)); - if (!(B->h_tid & GCFLAG_NURSERY_MOVED)) { + if (!(B->h_tid & GCFLAG_MOVED)) { B->h_tid |= GCFLAG_VISITED; assert(!(B->h_tid & GCFLAG_STUB)); gcptrlist_insert2(&objects_to_trace, obj, B); @@ -418,7 +418,7 @@ if (!(obj->h_tid & GCFLAG_VISITED)) { obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ obj->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); gcptrlist_insert(&objects_to_trace, obj); if (IS_POINTER(obj->h_revision)) { @@ -516,7 +516,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { gcptr R = item->addr; gcptr L = item->val; - if (R->h_tid & GCFLAG_NURSERY_MOVED) { + if (R->h_tid & GCFLAG_MOVED) { /* R was copied over its original */ gcptr new_R = (gcptr)R->h_original; /* gcptrlist_insert(&objects_to_trace, new_R); */ @@ -600,7 +600,7 @@ gcptr obj = items[i]; assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); /* we don't copy private / protected objects over prebuilts (yet) */ - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); if (!(obj->h_tid & GCFLAG_VISITED)) { /* forget 'obj' */ @@ -615,7 +615,7 @@ items = d->old_objects_to_trace.items; for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) { gcptr obj = items[i]; - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); assert(obj->h_tid & GCFLAG_VISITED); } #endif @@ -636,7 +636,7 @@ gcptr obj = items[i]; assert(!(obj->h_tid & GCFLAG_STUB)); - if (obj->h_tid & GCFLAG_NURSERY_MOVED) { + if (obj->h_tid & GCFLAG_MOVED) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); obj = (gcptr)obj->h_revision; items[i] = obj; @@ -652,7 +652,7 @@ obj = (gcptr)obj->h_revision; /* the backup-ptr should already be updated: */ - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); } revision_t v = obj->h_revision; @@ -695,7 +695,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { assert(item->addr->h_tid & GCFLAG_VISITED); assert(item->val->h_tid & GCFLAG_VISITED); - assert(!(item->addr->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(item->addr->h_tid & GCFLAG_MOVED)); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, which becomes: */ diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -132,7 +132,7 @@ static inline gcptr create_old_object_copy(gcptr obj) { assert(!(obj->h_tid & GCFLAG_PUBLIC)); - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); assert(!(obj->h_tid & GCFLAG_VISITED)); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); @@ -155,9 +155,9 @@ } else { /* it's a nursery object. Was it already moved? */ - if (UNLIKELY(obj->h_tid & GCFLAG_NURSERY_MOVED)) { + if (UNLIKELY(obj->h_tid & GCFLAG_MOVED)) { /* yes. Such an object can be a public object in the nursery - too (such objects are always NURSERY_MOVED). For all cases, + too (such objects are always MOVED). For all cases, we can just fix the ref. Can be stolen objects or those we already moved. */ @@ -178,7 +178,7 @@ fresh_old_copy = create_old_object_copy(obj); } - obj->h_tid |= GCFLAG_NURSERY_MOVED; + obj->h_tid |= GCFLAG_MOVED; obj->h_revision = (revision_t)fresh_old_copy; /* fix the original reference */ @@ -414,13 +414,13 @@ /* non-young or visited young objects are kept */ continue; } - else if (obj->h_tid & GCFLAG_NURSERY_MOVED) { + else if (obj->h_tid & GCFLAG_MOVED) { /* visited nursery objects are kept and updated */ items[i] = (gcptr)obj->h_revision; assert(!(items[i]->h_tid & GCFLAG_STUB)); continue; } - /* Sanity check: a nursery object without the NURSERY_MOVED flag + /* Sanity check: a nursery object without the MOVED flag is necessarily a private-without-backup object, or a protected object; it cannot be a public object. */ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); @@ -459,7 +459,7 @@ setup_minor_collect(d); /* first do this, which asserts that some objects are private --- - which fails if they have already been GCFLAG_NURSERY_MOVED */ + which fails if they have already been GCFLAG_MOVED */ mark_public_to_young(d); mark_young_roots(d); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -158,7 +158,7 @@ /* note that we should follow h_revision at least one more step: it is necessary if L is public but young (and then - has GCFLAG_NURSERY_MOVED), but it is fine to do it more + has GCFLAG_MOVED), but it is fine to do it more generally. */ v = ACCESS_ONCE(L->h_revision); if (IS_POINTER(v)) { @@ -191,7 +191,7 @@ } L->h_revision = (revision_t)O; - L->h_tid |= GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED; + L->h_tid |= GCFLAG_PUBLIC | GCFLAG_MOVED; /* subtle: we need to remove L from the fxcache of the target thread, otherwise its read barrier might not trigger on it. It is mostly fine because it is anyway identical to O. But diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -129,7 +129,7 @@ #define GCFLAG_BACKUP_COPY ... #define GCFLAG_PUBLIC_TO_PRIVATE ... #define GCFLAG_WRITE_BARRIER ... - #define GCFLAG_NURSERY_MOVED ... + #define GCFLAG_MOVED ... #define GCFLAG_STUB ... #define GCFLAG_PRIVATE_FROM_PROTECTED ... #define GCFLAG_HAS_ID ... @@ -645,9 +645,9 @@ return "stub" else: # public objects usually never live in the nursery, but - # if stealing makes one, it has GCFLAG_NURSERY_MOVED. + # if stealing makes one, it has GCFLAG_MOVED. if lib.in_nursery(p): - assert p.h_tid & GCFLAG_NURSERY_MOVED + assert p.h_tid & GCFLAG_MOVED assert not (p.h_revision & 1) # "is a pointer" return "public" if backup: diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -396,7 +396,7 @@ assert p2 == lib.stm_read_barrier(p) assert p2 != plist[-1] # p2 is a public moved-out-of-nursery assert plist[-1].h_tid & GCFLAG_PUBLIC - assert plist[-1].h_tid & GCFLAG_NURSERY_MOVED + assert plist[-1].h_tid & GCFLAG_MOVED assert plist[-1].h_revision == int(ffi.cast("revision_t", p2)) assert classify(p2) == "public" r.set(3) From noreply at buildbot.pypy.org Tue Jul 16 15:13:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 16 Jul 2013 15:13:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Skip this test which was never working, with a comment Message-ID: <20130716131320.4248D1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65417:88285f530855 Date: 2013-07-16 15:12 +0200 http://bitbucket.org/pypy/pypy/changeset/88285f530855/ Log: Skip this test which was never working, with a comment diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -741,6 +741,7 @@ assert s.classdef is a.bookkeeper.getuniqueclassdef(C) def test_union_type_some_pbc(self): + py.test.skip("is there a point? f() can return self.__class__ instead") class A(object): name = "A" From noreply at buildbot.pypy.org Tue Jul 16 16:04:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 16 Jul 2013 16:04:05 +0200 (CEST) Subject: [pypy-commit] cffi default: A test, and start to work on a fix, which doesn't work so far Message-ID: <20130716140405.C881D1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1278:e67e5b69f47b Date: 2013-07-16 16:03 +0200 http://bitbucket.org/cffi/cffi/changeset/e67e5b69f47b/ Log: A test, and start to work on a fix, which doesn't work so far diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -1422,10 +1422,28 @@ static int cdata_traverse(CDataObject *cd, visitproc visit, void *arg) { + /* XXX needs Py_TPFLAGS_HAVE_GC */ Py_VISIT(cd->c_type); return 0; } +static int cdataowning_traverse(CDataObject *cd, visitproc visit, void *arg) +{ + if (cd->c_type->ct_flags & CT_IS_PTR_TO_OWNED) { + Py_VISIT(((CDataObject_own_structptr *)cd)->structobj); + } + else if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { + PyObject *x = (PyObject *)(cd->c_data + 42); + Py_VISIT(x); + } + else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { + ffi_closure *closure = (ffi_closure *)cd->c_data; + PyObject *args = (PyObject *)(closure->user_data); + Py_VISIT(args); + } + return cdata_traverse(cd, visit, arg); +} + static PyObject *cdata_float(CDataObject *cd); /*forward*/ static PyObject *convert_cdata_to_enum_string(CDataObject *cd, int both) @@ -2427,7 +2445,7 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES, /* tp_flags */ 0, /* tp_doc */ - 0, /* tp_traverse */ + (traverseproc)cdataowning_traverse, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2771,6 +2771,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") From noreply at buildbot.pypy.org Tue Jul 16 17:05:10 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 16 Jul 2013 17:05:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: improved rpython bitblt removing some of the errors Message-ID: <20130716150510.E56781C0130@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r499:b992548ddcd8 Date: 2013-07-16 15:50 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b992548ddcd8/ Log: improved rpython bitblt removing some of the errors added a printing function to rpython bb as well as the minibluebookdebug image to allow printf debugging (comparing the results) added hard checks for lower bound on WordsObject and DisplayBitmap word access diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image index fa4c1e60de2e4482f2b3308297fef4b73835ef1e..4bb8ed8fd0cdcee3a381df2e4ad05a6af5337096 GIT binary patch [cut] diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -129,7 +129,6 @@ def __repr__(self): return self.as_repr_string() - @jit.elidable def as_repr_string(self): return "%r" % self @@ -878,6 +877,9 @@ self.setword(index0, word) def getword(self, n): + # if n < 0: + # import pdb; pdb.set_trace() + assert n >= 0 if self.words is not None: return self.words[n] else: @@ -1004,7 +1006,12 @@ return w_result def getword(self, n): + assert n >= 0 + # if self._realsize > n: return self._real_depth_buffer[n] + # else: + # print "Out-of-bounds access on display: %d/%d" % (n, self._realsize) + # import pdb; pdb.set_trace() def setword(self, n, word): raise NotImplementedError("subclass responsibility") diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -611,32 +611,23 @@ raise PrimitiveFailedError space = interp.space - import time - start = time.time() - print "blitting" - + s_bitblt = w_rcvr.as_bitblt_get_shadow(space) # See BlueBook p.356ff - s_bitblt = w_rcvr.as_bitblt_get_shadow(space) - s_bitblt.sync_cache() s_bitblt.clip_range() if s_bitblt.w <= 0 or s_bitblt.h <= 0: return w_rcvr # null range s_bitblt.compute_masks() s_bitblt.check_overlap() s_bitblt.calculate_offsets() - try: - s_bitblt.copy_loop() - except IndexError: - raise PrimitiveFailedError() + # print s_bitblt.as_string() + s_bitblt.copy_loop() w_dest_form = w_rcvr.fetch(space, 0) if w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) assert isinstance(w_bitmap, model.W_DisplayBitmap) w_bitmap.flush_to_screen() - - print "blitting finshed after %d ms" % int((time.time() - start) * 1000) return w_rcvr # try: diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1129,9 +1129,9 @@ "dest_index", "source_delta", "dest_delta"] WordSize = 32 - RightMasks = [rarithmetic.r_uint(1)] + RightMasks = [rarithmetic.r_uint(0)] for i in xrange(WordSize): - RightMasks.append(rarithmetic.r_uint((2 ** (i + 2)) - 1)) + RightMasks.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) AllOnes = rarithmetic.r_uint((2 ** WordSize) - 1) def sync_cache(self): @@ -1174,14 +1174,14 @@ else: self.halftone_bits = None self.combination_rule = self.space.unwrap_int(self.fetch(3)) - self.dest_x = self.space.unwrap_int(self.fetch(4)) - 1 - self.dest_y = self.space.unwrap_int(self.fetch(5)) - 1 + self.dest_x = self.space.unwrap_int(self.fetch(4)) + self.dest_y = self.space.unwrap_int(self.fetch(5)) self.width = self.space.unwrap_int(self.fetch(6)) self.height = self.space.unwrap_int(self.fetch(7)) - self.source_x = self.space.unwrap_int(self.fetch(8)) - 1 - self.source_y = self.space.unwrap_int(self.fetch(9)) - 1 - self.clip_x = self.space.unwrap_int(self.fetch(10)) - 1 - self.clip_y = self.space.unwrap_int(self.fetch(11)) - 1 + self.source_x = self.space.unwrap_int(self.fetch(8)) + self.source_y = self.space.unwrap_int(self.fetch(9)) + self.clip_x = self.space.unwrap_int(self.fetch(10)) + self.clip_y = self.space.unwrap_int(self.fetch(11)) self.clip_width = self.space.unwrap_int(self.fetch(12)) self.clip_height = self.space.unwrap_int(self.fetch(13)) self.color_map = self.fetch(14) @@ -1263,27 +1263,26 @@ self.sx = self.sx + self.w - 1 self.dx = self.dx + self.w - 1 self.skew_mask = ~self.skew_mask - assert isinstance(self.mask2, rarithmetic.r_uint) self.mask1, self.mask2 = self.mask2, self.mask1 def calculate_offsets(self): self.preload = (self.source_form is not None and ( - self.skew_mask != 0 and + self.skew != 0 and self.skew <= (self.sx & (BitBltShadow.WordSize - 1)))) if self.h_dir < 0: self.preload = not self.preload - self.source_index = self.sy * self.source_raster + self.sx // BitBltShadow.WordSize - self.dest_index = self.dy * self.dest_raster + self.dx // BitBltShadow.WordSize - self.source_delta = ((self.source_raster * + self.source_index = self.sy * self.source_raster + (self.sx // BitBltShadow.WordSize) + self.dest_index = self.dy * self.dest_raster + (self.dx // BitBltShadow.WordSize) + self.source_delta = (self.source_raster * self.v_dir - - (self.n_words + (1 if self.preload else 0))) * - self.h_dir) + ((self.n_words + (1 if self.preload else 0)) * + self.h_dir)) self.dest_delta = self.dest_raster * self.v_dir - self.n_words * self.h_dir def copy_loop(self): space = self.space no_skew_mask = ~self.skew_mask - for i in xrange(self.h): + for i in xrange(1, self.h+1): if self.halftone_bits: halftone_word = self.halftone_bits[self.dy % len(self.halftone_bits)] self.dy = self.dy + self.v_dir @@ -1296,17 +1295,18 @@ else: prev_word = 0 merge_mask = self.mask1 - for word in xrange(self.n_words): + for word in xrange(1, self.n_words + 1): if self.source_form is not None: prev_word = prev_word & self.skew_mask - try: + if (self.source_index < 0 + or self.source_index >= self.source_bits.size()): + this_word = self.source_bits.getword(0) + else: this_word = self.source_bits.getword(self.source_index) - except IndexError: - this_word = self.source_bits.getword(0) skew_word = prev_word | (this_word & no_skew_mask) prev_word = this_word skew_word = (self.bit_shift(skew_word, self.skew) | - self.bit_shift(skew_word, self.skew - 16)) + self.bit_shift(skew_word, self.skew - BitBltShadow.WordSize)) merge_word = rarithmetic.r_uint(self.merge( skew_word & halftone_word, self.dest_bits.getword(self.dest_index) @@ -1326,12 +1326,17 @@ self.dest_index = self.dest_index + self.dest_delta def bit_shift(self, target, amount): - if amount > 0: + if amount > 31 or amount < -31: + return 0 + elif amount > 0: return (rarithmetic.r_uint(target) << amount) & BitBltShadow.AllOnes + elif amount == 0: + return target else: - return (rarithmetic.r_uint(target) >> -amount) & BitBltShadow.AllOnes + return (rarithmetic.r_uint(target) >> -amount) def merge(self, source_word, dest_word): + assert isinstance(source_word, rarithmetic.r_uint) and isinstance(dest_word, rarithmetic.r_uint) if self.combination_rule == 0: return 0 elif self.combination_rule == 1: @@ -1364,9 +1369,24 @@ return ~source_word | ~dest_word elif self.combination_rule == 15: return dest_word & BitBltShadow.AllOnes + elif self.combination_rule >= 16 and self.combination_rule <= 24: + return dest_word + elif self.combination_rule == 25: + if source_word == 0: + return dest_word + else: + return source_word | (dest_word & ~source_word) + elif 26 <= self.combination_rule <= 41: + return dest_word else: raise error.PrimitiveFailedError() + def as_string(bb): + return 'aBitBlt (destX: %d, destY: %d, sx: %d, sy: %d, dx: %d, dy: %d, w: %d, h: %d, hDir: %d, vDir: %d, sDelta: %d, dDelta: %d, skew: %d, sI: %d, dI: %d)' % ( + bb.dest_x, bb.dest_y, bb.sx, bb.sy, bb.dx, bb.dy, bb.w, bb.h, bb.h_dir, bb.v_dir, bb.source_delta, bb.dest_delta, bb.skew, bb.source_index, bb.dest_index) + # "dest_raster", "source_raster", + # "halftone_bits", "mask1", "mask2", "skew_mask", + # "n_words", "preload" class FormShadow(AbstractCachingShadow): _attrs_ = ["w_bits", "width", "height", "depth", "offset_x", "offset_y"] @@ -1384,8 +1404,8 @@ w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: - self.offset_x = self.space.unwrap_int(w_offset._fetch(0)) - 1 - self.offset_y = self.space.unwrap_int(w_offset._fetch(1)) - 1 + self.offset_x = self.space.unwrap_int(w_offset._fetch(0)) + self.offset_y = self.space.unwrap_int(w_offset._fetch(1)) # def replace_bits(self): # w_bits = self.w_bits From noreply at buildbot.pypy.org Tue Jul 16 17:05:12 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 16 Jul 2013 17:05:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: greened tests Message-ID: <20130716150512.0C0501C0130@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r500:8f1a3b5e4c46 Date: 2013-07-16 16:19 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8f1a3b5e4c46/ Log: greened tests diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -879,7 +879,7 @@ def getword(self, n): # if n < 0: # import pdb; pdb.set_trace() - assert n >= 0 + assert self.size() > n >= 0 if self.words is not None: return self.words[n] else: @@ -1006,7 +1006,7 @@ return w_result def getword(self, n): - assert n >= 0 + assert self.size() > n >= 0 # if self._realsize > n: return self._real_depth_buffer[n] # else: diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1282,7 +1282,7 @@ def copy_loop(self): space = self.space no_skew_mask = ~self.skew_mask - for i in xrange(1, self.h+1): + for i in xrange(1, self.h + 1): if self.halftone_bits: halftone_word = self.halftone_bits[self.dy % len(self.halftone_bits)] self.dy = self.dy + self.v_dir @@ -1298,7 +1298,7 @@ for word in xrange(1, self.n_words + 1): if self.source_form is not None: prev_word = prev_word & self.skew_mask - if (self.source_index < 0 + if (self.source_index < 0 or self.source_index >= self.source_bits.size()): this_word = self.source_bits.getword(0) else: @@ -1392,6 +1392,11 @@ _attrs_ = ["w_bits", "width", "height", "depth", "offset_x", "offset_y"] def sync_cache(self): + if self.size() < 5: + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise error.PrimitiveFailedError self.w_bits = self.fetch(0) if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): w_self = self.w_self() diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -776,12 +776,16 @@ assert argcount == 0 raise CallCopyBitsSimulation + def sync_cache_mock(self): + raise CallCopyBitsSimulation + interp, w_frame, argument_count = mock([mock_bitblt], None) if interp.image is None: interp.image = Image() try: monkeypatch.setattr(w_frame._shadow, "_sendSelfSelector", perform_mock) + monkeypatch.setattr(shadow.BitBltShadow, "sync_cache", sync_cache_mock) with py.test.raises(CallCopyBitsSimulation): prim_table[primitives.BITBLT_COPY_BITS](interp, w_frame.as_context_get_shadow(space), argument_count-1) finally: From noreply at buildbot.pypy.org Tue Jul 16 17:05:13 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 16 Jul 2013 17:05:13 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added first rpython bitblt test Message-ID: <20130716150513.0E9721C0130@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r501:e98733626cd9 Date: 2013-07-16 17:04 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e98733626cd9/ Log: added first rpython bitblt test diff --git a/spyvm/test/test_bitblt.py b/spyvm/test/test_bitblt.py new file mode 100644 --- /dev/null +++ b/spyvm/test/test_bitblt.py @@ -0,0 +1,78 @@ +from spyvm import model, shadow, constants, interpreter, objspace + +space = objspace.ObjSpace() + +# copy from test_miniimage +def w(any): + # XXX could put this on the space? + if any is None: + return space.w_nil + if isinstance(any, str): + # assume never have strings of length 1 + if len(any) == 1: + return space.wrap_chr(any) + else: + return space.wrap_string(any) + if isinstance(any, bool): + return space.wrap_bool(any) + if isinstance(any, int): + return space.wrap_int(any) + if isinstance(any, float): + return space.wrap_float(any) + else: + raise Exception + +def make_form(bits, width, height, depth, o_x=0, o_y=0): + w_f = model.W_PointersObject(space, space.w_Array, 5) + w_f.store(space, 0, model.W_WordsObject(space, space.w_Array, len(bits))) + w_f.fetch(space, 0).words = bits + w_f.store(space, 1, w(width)) + w_f.store(space, 2, w(height)) + w_f.store(space, 3, w(depth)) + w_f.store(space, 4, model.W_PointersObject(space, space.w_Point, 2)) + w_f.fetch(space, 4).store(space, 0, w(o_x)) + w_f.fetch(space, 4).store(space, 1, w(o_y)) + return w_f + +def test_bitBlt_values(): + + w_bb = model.W_PointersObject(space, space.w_Array, 15) + w_bb.store(space, 0, make_form([], 1230, 20, 1)) + w_bb.store(space, 1, w_bb.fetch(space, 0)) + + w_bb.store(space, 2, space.w_nil) + w_bb.store(space, 3, w(7)) # combination rule + w_bb.store(space, 4, w(1)) # dest x + w_bb.store(space, 5, w(0)) # dest y + w_bb.store(space, 6, w(1220)) # width + w_bb.store(space, 7, w(15)) # height + w_bb.store(space, 8, w(0)) # source x + w_bb.store(space, 9, w(0)) # source y + w_bb.store(space, 10, w(0)) # clip x + w_bb.store(space, 11, w(0)) # clip y + w_bb.store(space, 12, w(1220)) # clip width + w_bb.store(space, 13, w(15)) # clip height + w_bb.store(space, 14, model.W_PointersObject(space, space.w_Array, 5)) # color map + + s_bb = w_bb.as_bitblt_get_shadow(space) + s_bb.clip_range() + assert not (s_bb.w <= 0 or s_bb.h <= 0) + s_bb.compute_masks() + s_bb.check_overlap() + s_bb.calculate_offsets() + + assert s_bb.dest_x == 1 + assert s_bb.dest_y == 0 + assert s_bb.sx == 1218 + assert s_bb.sy == 0 + assert s_bb.dx == 1219 + assert s_bb.dy == 0 + assert s_bb.w == 1219 + assert s_bb.h == 15 + assert s_bb.h_dir == -1 + assert s_bb.v_dir == 1 + assert s_bb.source_delta == 79 + assert s_bb.dest_delta == 78 + assert s_bb.skew == 31 + assert s_bb.source_index == 38 + assert s_bb.dest_index == 38 \ No newline at end of file From noreply at buildbot.pypy.org Tue Jul 16 17:08:07 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 16 Jul 2013 17:08:07 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added orig files and my cache of previous targetimageloadingsmalltalk-versions to .hgignore Message-ID: <20130716150807.F34661C0130@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r502:5ed5f4b4771f Date: 2013-07-16 17:07 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/5ed5f4b4771f/ Log: added orig files and my cache of previous targetimageloadingsmalltalk-versions to .hgignore diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -3,8 +3,8 @@ *~ pypy-c-jit-62116-b027d4428675-linux images/Squeak* -targetimageloadingsmalltalk-c +targetimageloadingsmalltalk-*c images/package-cache versions coglinux - +*.orig From noreply at buildbot.pypy.org Tue Jul 16 17:36:39 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 16 Jul 2013 17:36:39 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: fixing the remaining test Message-ID: <20130716153639.6D40C1C10AB@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r503:0340cec8ce22 Date: 2013-07-16 17:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0340cec8ce22/ Log: fixing the remaining test diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -72,7 +72,7 @@ w_bytes.setword(3, 42) assert w_bytes.getword(3) == 42 assert w_bytes.getword(0) == 0 - py.test.raises(IndexError, lambda: w_bytes.getword(20)) + py.test.raises(AssertionError, lambda: w_bytes.getword(20)) def test_c_word_object(): w_class = mockclass(space, 0, format=shadow.WORDS) @@ -85,7 +85,7 @@ w_bytes.setword(3, 42) assert w_bytes.getword(3) == 42 assert w_bytes.getword(0) == 0 - py.test.raises(IndexError, lambda: w_bytes.getword(20)) + py.test.raises(AssertionError, lambda: w_bytes.getword(20)) def test_method_lookup(): class mockmethod(object): From noreply at buildbot.pypy.org Tue Jul 16 17:36:40 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Tue, 16 Jul 2013 17:36:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added printing to blockContextShadows to improve the exit print on debugger-spawn in old images Message-ID: <20130716153640.8A9341C10AB@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r504:8f27fbc5ff8c Date: 2013-07-16 17:36 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8f27fbc5ff8c/ Log: added printing to blockContextShadows to improve the exit print on debugger-spawn in old images diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -366,6 +366,12 @@ def func(interp, s_frame, argcount): from spyvm.error import Exit if s_frame.w_method()._likely_methodname == 'doesNotUnderstand:': + print '' + print s_frame.print_stack() + w_message = s_frame.peek(0) + print w_message.as_repr_string() + if isinstance(w_message, model.W_PointersObject): + print w_message._vars raise Exit('Probably Debugger called...') raise PrimitiveFailedError() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -839,6 +839,9 @@ self.pc() + 1 ) + def method_str(self): + return '[] of %s' % self.w_method().get_identifier_string() + class MethodContextShadow(ContextPartShadow): _attrs_ = ['w_closure_or_nil', '_w_receiver', '_w_method'] From noreply at buildbot.pypy.org Wed Jul 17 10:43:38 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 17 Jul 2013 10:43:38 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: add the package-tk merge Message-ID: <20130717084338.614C91C0130@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65420:e021941db0c3 Date: 2013-07-17 10:40 +0200 http://bitbucket.org/pypy/pypy/changeset/e021941db0c3/ Log: add the package-tk merge diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + From noreply at buildbot.pypy.org Wed Jul 17 10:43:39 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 17 Jul 2013 10:43:39 +0200 (CEST) Subject: [pypy-commit] pypy default: mention package-tk branch in whatsnew-2.1 Message-ID: <20130717084339.8C61A1C0130@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65421:9ba7a3d27c85 Date: 2013-07-17 10:42 +0200 http://bitbucket.org/pypy/pypy/changeset/9ba7a3d27c85/ Log: mention package-tk branch in whatsnew-2.1 diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + From noreply at buildbot.pypy.org Wed Jul 17 10:58:26 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 17 Jul 2013 10:58:26 +0200 (CEST) Subject: [pypy-commit] pypy default: import updated _cffi_backend/test/_backend_test_c.py Message-ID: <20130717085826.800B41C0130@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65422:71da175abf19 Date: 2013-07-17 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/71da175abf19/ Log: import updated _cffi_backend/test/_backend_test_c.py diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2760,6 +2760,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") From noreply at buildbot.pypy.org Wed Jul 17 10:58:27 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 17 Jul 2013 10:58:27 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: import updated _cffi_backend/test/_backend_test_c.py Message-ID: <20130717085827.BED561C021A@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65423:371a517e6484 Date: 2013-07-17 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/371a517e6484/ Log: import updated _cffi_backend/test/_backend_test_c.py diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2760,6 +2760,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") From noreply at buildbot.pypy.org Wed Jul 17 11:23:18 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 17 Jul 2013 11:23:18 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fighting layers to make it work Message-ID: <20130717092318.38FE01C0130@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65424:8a3c2efecdca Date: 2013-07-17 11:22 +0200 http://bitbucket.org/pypy/pypy/changeset/8a3c2efecdca/ Log: fighting layers to make it work diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -628,7 +628,7 @@ descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) + fail_descr = rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -222,7 +222,7 @@ raise AssertionError(kind) gcref = cast_instance_to_gcref(value) - rgc._make_sure_does_not_move(gcref) + gcref = rgc._make_sure_does_not_move(gcref) value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -107,9 +107,12 @@ gcrefs_output_list.append(new_p) if op.is_guard() or op.getopnum() == rop.FINISH: - llref = cast_instance_to_gcref(op.getdescr()) + # the only ops with descrs that get recorded in a trace + from rpython.jit.metainterp.history import AbstractDescr + descr = op.getdescr() + llref = cast_instance_to_gcref(descr) new_llref = rgc._make_sure_does_not_move(llref) - new_d = rgc.try_cast_gcref_to_instance(llref.__class__, new_llref) + new_d = rgc.try_cast_gcref_to_instance(AbstractDescr, new_llref) op.setdescr(new_d) gcrefs_output_list.append(new_llref) @@ -298,7 +301,7 @@ self.returns_modified_object = False self.gcheaderbuilder = gc_ll_descr.gcheaderbuilder self.HDRPTR = gc_ll_descr.HDRPTR - self.b_slowpath = [0, 0, 0, 0] + self.b_slowpath = [0, 0, 0, 0, 0] def repr_of_descr(self): raise NotImplementedError diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -368,7 +368,7 @@ descr = op.getdescr() fail_descr = cast_instance_to_gcref(descr) # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) + fail_descr = rgc._make_sure_does_not_move(fail_descr) fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -65,21 +65,25 @@ return llop.stm_get_tid(llgroup.HALFWORD, obj) def get_hdr_tid(self, addr): - return llmemory.cast_adr_to_int(addr + self.H_TID) + return llmemory.cast_adr_to_ptr(addr + self.H_TID, rffi.SIGNEDP) def get_hdr_revision(self, addr): - return llmemory.cast_adr_to_int(addr + self.H_REVISION) - + return llmemory.cast_adr_to_ptr(addr + self.H_REVISION, rffi.SIGNEDP) + def get_hdr_original(self, addr): - return llmemory.cast_adr_to_int(addr + self.H_ORIGINAL) + return llmemory.cast_adr_to_ptr(addr + self.H_ORIGINAL, rffi.SIGNEDP) - def get_original_object(self, obj): - if bool(self.get_hdr_tid(obj) & GCFLAG_PREBUILT_ORIGINAL): + def get_original_copy(self, obj): + addr = llmemory.cast_ptr_to_adr(obj) + if bool(self.get_hdr_tid(addr)[0] & GCFLAG_PREBUILT_ORIGINAL): return obj - orig = self.get_hdr_original(obj) + # + orig = self.get_hdr_original(addr)[0] if orig == 0: return obj - return llmemory.cast_int_to_adr(orig) + # + return llmemory.cast_adr_to_ptr(llmemory.cast_int_to_adr(orig), + llmemory.GCREF) def init_gc_object_immortal(self, addr, typeid16, flags=0): assert flags == 0 @@ -117,7 +121,7 @@ def can_move(self, obj): """Means the reference will stay valid, except if not seen by the GC, then it can get collected.""" - tid = self.get_hdr_tid(obj) + tid = self.get_hdr_tid(obj)[0] if bool(tid & GCFLAG_OLD): return False return True diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -314,6 +314,13 @@ self.can_move_ptr = getfn(GCClass.can_move.im_func, [s_gc, annmodel.SomeAddress()], annmodel.SomeBool()) + if hasattr(GCClass, 'get_original_copy'): + self.get_original_copy_ptr = getfn( + GCClass.get_original_copy.im_func, + [s_gc, annmodel.SomePtr(llmemory.GCREF)], + annmodel.SomePtr(llmemory.GCREF)) + else: + self.get_original_copy_ptr = None if hasattr(GCClass, 'shrink_array'): self.shrink_array_ptr = getfn( @@ -744,6 +751,16 @@ hop.genop("direct_call", [self.can_move_ptr, self.c_const_gc, v_addr], resultvar=op.result) + def gct_gc_get_original_copy(self, hop): + if self.get_original_copy_ptr is None: + raise Exception("unreachable code") + op = hop.spaceop + v_addr = hop.genop('cast_ptr_to_adr', + [op.args[0]], resulttype=llmemory.Address) + hop.genop("direct_call", [self.get_original_copy_ptr, + self.c_const_gc, v_addr], + resultvar=op.result) + def gct_shrink_array(self, hop): if self.shrink_array_ptr is None: return GCTransformer.gct_shrink_array(self, hop) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -26,6 +26,12 @@ return None # means 'not translated at all'; # in "if stm_is_enabled()" it is equivalent to False +def stm_get_original_copy(obj): + """ Returns a non-moving reference to an object (only use if obj is + already OLD!) + """ + return lltype.nullptr(llmemory.GCREF) + # ____________________________________________________________ # Annotation and specialization @@ -70,6 +76,20 @@ hop.exception_cannot_occur() return hop.inputconst(lltype.Bool, hop.s_result.const) + +class StmGCGetOriginalCopy(ExtRegistryEntry): + _about_ = stm_get_original_copy + + def compute_result_annotation(self, s_obj): + from rpython.annotator import model as annmodel + return annmodel.SomePtr(llmemory.GCREF) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + return hop.genop('gc_get_original_copy', hop.args_v, + resulttype=hop.r_result) + + def can_move(p): """Check if the GC object 'p' is at an address that can move. Must not be called with None. With non-moving GCs, it is always False. @@ -108,9 +128,7 @@ i += 1 if stm_is_enabled(): - from rpython.memory.gc.stmgc import StmGC - assert isinstance(gc, StmGC) - return gc.get_original_object() + return stm_get_original_copy(p) else: return p diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -526,6 +526,7 @@ 'gc_obtain_free_space': LLOp(), 'gc_set_max_heap_size': LLOp(), 'gc_can_move' : LLOp(sideeffects=False), + 'gc_get_original_copy': LLOp(sideeffects=False), 'gc_thread_prepare' : LLOp(canmallocgc=True), 'gc_thread_run' : LLOp(), 'gc_thread_start' : LLOp(), diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -15,6 +15,7 @@ 'jit_record_known_class', 'gc_identityhash', 'gc_id', 'gc_can_move', 'gc__collect', 'gc_adr_of_root_stack_top', + 'stmgc_get_original_copy', 'weakref_create', 'weakref_deref', 'stm_threadlocalref_get', 'stm_threadlocalref_set', 'stm_threadlocalref_count', 'stm_threadlocalref_addr', From noreply at buildbot.pypy.org Wed Jul 17 13:51:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Might go away again, but: attempting to give app-level code direct Message-ID: <20130717115107.C9EC31C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65425:b91fe2eeb20d Date: 2013-07-17 12:19 +0200 http://bitbucket.org/pypy/pypy/changeset/b91fe2eeb20d/ Log: Might go away again, but: attempting to give app-level code direct access to the wrap-around C-ish versions of some arithmetic operations. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -36,6 +36,20 @@ } +class IntOpModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'int_add': 'interp_intop.int_add', + 'int_sub': 'interp_intop.int_sub', + 'int_mul': 'interp_intop.int_mul', + 'int_floordiv': 'interp_intop.int_floordiv', + 'uint_floordiv': 'interp_intop.uint_floordiv', + 'int_mod': 'interp_intop.int_mod', + 'int_lshift': 'interp_intop.int_lshift', + 'uint_rshift': 'interp_intop.uint_rshift', + } + + class Module(MixedModule): appleveldefs = { } @@ -67,6 +81,7 @@ "builders": BuildersModule, "time": TimeModule, "thread": ThreadModule, + "intop": IntOpModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_intop.py @@ -0,0 +1,7 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rlib.rarithmetic import intmask + + + at unwrap_spec(n=int, m=int) +def int_add(space, n, m): + return space.wrap(intmask(n + m)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_intop.py @@ -0,0 +1,11 @@ + + +class AppTestIntOp: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_int_add(self): + import sys + from __pypy__ import intop + assert intop.int_add(40, 2) == 42 + assert intop.int_add(sys.maxint, 1) == -sys.maxint-1 + assert intop.int_add(-2, -sys.maxint) == sys.maxint From noreply at buildbot.pypy.org Wed Jul 17 13:51:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:08 +0200 (CEST) Subject: [pypy-commit] pypy default: int_sub Message-ID: <20130717115108.F1B6F1C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65426:0d0d42b3bf62 Date: 2013-07-17 12:20 +0200 http://bitbucket.org/pypy/pypy/changeset/0d0d42b3bf62/ Log: int_sub diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py --- a/pypy/module/__pypy__/interp_intop.py +++ b/pypy/module/__pypy__/interp_intop.py @@ -5,3 +5,7 @@ @unwrap_spec(n=int, m=int) def int_add(space, n, m): return space.wrap(intmask(n + m)) + + at unwrap_spec(n=int, m=int) +def int_sub(space, n, m): + return space.wrap(intmask(n - m)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -9,3 +9,10 @@ assert intop.int_add(40, 2) == 42 assert intop.int_add(sys.maxint, 1) == -sys.maxint-1 assert intop.int_add(-2, -sys.maxint) == sys.maxint + + def test_int_sub(self): + import sys + from __pypy__ import intop + assert intop.int_sub(40, -2) == 42 + assert intop.int_sub(sys.maxint, -1) == -sys.maxint-1 + assert intop.int_sub(-2, sys.maxint) == sys.maxint From noreply at buildbot.pypy.org Wed Jul 17 13:51:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:10 +0200 (CEST) Subject: [pypy-commit] pypy default: int_mul Message-ID: <20130717115110.2CC471C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65427:941cffc3ef01 Date: 2013-07-17 12:26 +0200 http://bitbucket.org/pypy/pypy/changeset/941cffc3ef01/ Log: int_mul diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py --- a/pypy/module/__pypy__/interp_intop.py +++ b/pypy/module/__pypy__/interp_intop.py @@ -9,3 +9,7 @@ @unwrap_spec(n=int, m=int) def int_sub(space, n, m): return space.wrap(intmask(n - m)) + + at unwrap_spec(n=int, m=int) +def int_mul(space, n, m): + return space.wrap(intmask(n * m)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -3,6 +3,23 @@ class AppTestIntOp: spaceconfig = dict(usemodules=['__pypy__']) + def w_intmask(self, n): + import sys + n &= (sys.maxint*2+1) + if n > sys.maxint: + n -= 2*(sys.maxint+1) + return int(n) + + def test_intmask(self): + import sys + assert self.intmask(sys.maxint) == sys.maxint + assert self.intmask(sys.maxint+1) == -sys.maxint-1 + assert self.intmask(-sys.maxint-2) == sys.maxint + N = 2 ** 128 + assert self.intmask(N+sys.maxint) == sys.maxint + assert self.intmask(N+sys.maxint+1) == -sys.maxint-1 + assert self.intmask(N-sys.maxint-2) == sys.maxint + def test_int_add(self): import sys from __pypy__ import intop @@ -16,3 +33,10 @@ assert intop.int_sub(40, -2) == 42 assert intop.int_sub(sys.maxint, -1) == -sys.maxint-1 assert intop.int_sub(-2, sys.maxint) == sys.maxint + + def test_int_mul(self): + import sys + from __pypy__ import intop + assert intop.int_mul(40, -2) == -80 + assert intop.int_mul(-sys.maxint, -sys.maxint) == ( + self.intmask(sys.maxint ** 2)) From noreply at buildbot.pypy.org Wed Jul 17 13:51:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:11 +0200 (CEST) Subject: [pypy-commit] pypy default: int_floordiv Message-ID: <20130717115111.653381C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65428:ee9c24742973 Date: 2013-07-17 12:32 +0200 http://bitbucket.org/pypy/pypy/changeset/ee9c24742973/ Log: int_floordiv diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py --- a/pypy/module/__pypy__/interp_intop.py +++ b/pypy/module/__pypy__/interp_intop.py @@ -1,5 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.rarithmetic import intmask +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop @unwrap_spec(n=int, m=int) @@ -13,3 +15,7 @@ @unwrap_spec(n=int, m=int) def int_mul(space, n, m): return space.wrap(intmask(n * m)) + + at unwrap_spec(n=int, m=int) +def int_floordiv(space, n, m): + return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -40,3 +40,13 @@ assert intop.int_mul(40, -2) == -80 assert intop.int_mul(-sys.maxint, -sys.maxint) == ( self.intmask(sys.maxint ** 2)) + + def test_int_floordiv(self): + import sys + from __pypy__ import intop + assert intop.int_floordiv(41, 3) == 13 + assert intop.int_floordiv(41, -3) == -13 + assert intop.int_floordiv(-41, 3) == -13 + assert intop.int_floordiv(-41, -3) == 13 + assert intop.int_floordiv(-sys.maxint, -1) == sys.maxint + assert intop.int_floordiv(sys.maxint, -1) == -sys.maxint From noreply at buildbot.pypy.org Wed Jul 17 13:51:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Cannot give uint_floordiv a useful sense without an "unsigned" type Message-ID: <20130717115112.8B4061C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65429:39fb2e0a9f4b Date: 2013-07-17 12:32 +0200 http://bitbucket.org/pypy/pypy/changeset/39fb2e0a9f4b/ Log: Cannot give uint_floordiv a useful sense without an "unsigned" type diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -43,7 +43,6 @@ 'int_sub': 'interp_intop.int_sub', 'int_mul': 'interp_intop.int_mul', 'int_floordiv': 'interp_intop.int_floordiv', - 'uint_floordiv': 'interp_intop.uint_floordiv', 'int_mod': 'interp_intop.int_mod', 'int_lshift': 'interp_intop.int_lshift', 'uint_rshift': 'interp_intop.uint_rshift', From noreply at buildbot.pypy.org Wed Jul 17 13:51:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:13 +0200 (CEST) Subject: [pypy-commit] pypy default: int_mod Message-ID: <20130717115113.AFCDF1C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65430:cdd7666ee866 Date: 2013-07-17 12:34 +0200 http://bitbucket.org/pypy/pypy/changeset/cdd7666ee866/ Log: int_mod diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py --- a/pypy/module/__pypy__/interp_intop.py +++ b/pypy/module/__pypy__/interp_intop.py @@ -19,3 +19,7 @@ @unwrap_spec(n=int, m=int) def int_floordiv(space, n, m): return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mod(space, n, m): + return space.wrap(llop.int_mod(lltype.Signed, n, m)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -50,3 +50,13 @@ assert intop.int_floordiv(-41, -3) == 13 assert intop.int_floordiv(-sys.maxint, -1) == sys.maxint assert intop.int_floordiv(sys.maxint, -1) == -sys.maxint + + def test_int_mod(self): + import sys + from __pypy__ import intop + assert intop.int_mod(41, 3) == 2 + assert intop.int_mod(41, -3) == 2 + assert intop.int_mod(-41, 3) == -2 + assert intop.int_mod(-41, -3) == -2 + assert intop.int_mod(-sys.maxint, -1) == 0 + assert intop.int_mod(sys.maxint, -1) == 0 From noreply at buildbot.pypy.org Wed Jul 17 13:51:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:14 +0200 (CEST) Subject: [pypy-commit] pypy default: Use a consistent style and avoid intmask() Message-ID: <20130717115114.D59B91C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65431:c60b161838a3 Date: 2013-07-17 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/c60b161838a3/ Log: Use a consistent style and avoid intmask() diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py --- a/pypy/module/__pypy__/interp_intop.py +++ b/pypy/module/__pypy__/interp_intop.py @@ -1,20 +1,19 @@ from pypy.interpreter.gateway import unwrap_spec -from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop @unwrap_spec(n=int, m=int) def int_add(space, n, m): - return space.wrap(intmask(n + m)) + return space.wrap(llop.int_add(lltype.Signed, n, m)) @unwrap_spec(n=int, m=int) def int_sub(space, n, m): - return space.wrap(intmask(n - m)) + return space.wrap(llop.int_sub(lltype.Signed, n, m)) @unwrap_spec(n=int, m=int) def int_mul(space, n, m): - return space.wrap(intmask(n * m)) + return space.wrap(llop.int_mul(lltype.Signed, n, m)) @unwrap_spec(n=int, m=int) def int_floordiv(space, n, m): From noreply at buildbot.pypy.org Wed Jul 17 13:51:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 13:51:16 +0200 (CEST) Subject: [pypy-commit] pypy default: int_lshift, uint_rshift Message-ID: <20130717115116.01E731C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65432:3e7495dc4215 Date: 2013-07-17 12:42 +0200 http://bitbucket.org/pypy/pypy/changeset/3e7495dc4215/ Log: int_lshift, uint_rshift diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py --- a/pypy/module/__pypy__/interp_intop.py +++ b/pypy/module/__pypy__/interp_intop.py @@ -1,6 +1,7 @@ from pypy.interpreter.gateway import unwrap_spec from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rarithmetic import r_uint, intmask @unwrap_spec(n=int, m=int) @@ -22,3 +23,13 @@ @unwrap_spec(n=int, m=int) def int_mod(space, n, m): return space.wrap(llop.int_mod(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_lshift(space, n, m): + return space.wrap(llop.int_lshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def uint_rshift(space, n, m): + n = r_uint(n) + x = llop.uint_rshift(lltype.Unsigned, n, m) + return space.wrap(intmask(x)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -60,3 +60,37 @@ assert intop.int_mod(-41, -3) == -2 assert intop.int_mod(-sys.maxint, -1) == 0 assert intop.int_mod(sys.maxint, -1) == 0 + + def test_int_lshift(self): + import sys + from __pypy__ import intop + if sys.maxint == 2**31-1: + bits = 32 + else: + bits = 64 + assert intop.int_lshift(42, 3) == 42 << 3 + assert intop.int_lshift(0, 3333) == 0 + assert intop.int_lshift(1, bits-2) == 1 << (bits-2) + assert intop.int_lshift(1, bits-1) == -sys.maxint-1 == (-1) << (bits-1) + assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) + assert intop.int_lshift(-1, bits-1) == -sys.maxint-1 + assert intop.int_lshift(sys.maxint // 3, 2) == ( + self.intmask((sys.maxint // 3) << 2)) + assert intop.int_lshift(-sys.maxint // 3, 2) == ( + self.intmask((-sys.maxint // 3) << 2)) + + def test_uint_rshift(self): + import sys + from __pypy__ import intop + if sys.maxint == 2**31-1: + bits = 32 + else: + bits = 64 + N = 1 << bits + assert intop.uint_rshift(42, 3) == 42 >> 3 + assert intop.uint_rshift(-42, 3) == (N-42) >> 3 + assert intop.uint_rshift(0, 3333) == 0 + assert intop.uint_rshift(-1, 0) == -1 + assert intop.uint_rshift(-1, 1) == sys.maxint + assert intop.uint_rshift(-1, bits-2) == 3 + assert intop.uint_rshift(-1, bits-1) == 1 From noreply at buildbot.pypy.org Wed Jul 17 14:25:57 2013 From: noreply at buildbot.pypy.org (Ben Darnell) Date: Wed, 17 Jul 2013 14:25:57 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Add the SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER flag. Message-ID: <20130717122557.7E8A81C0651@cobra.cs.uni-duesseldorf.de> Author: Ben Darnell Branch: release-2.1.x Changeset: r65433:94f8ef1a69a0 Date: 2013-07-13 10:32 -0400 http://bitbucket.org/pypy/pypy/changeset/94f8ef1a69a0/ Log: Add the SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER flag. This disables a sanity check in openssl that can cause problems when it is used in non-blocking mode and the GC causes the address of a str object to change (https://bugs.pypy.org/issue1238). diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -722,7 +722,10 @@ libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -93,6 +93,7 @@ SSL_RECEIVED_SHUTDOWN = rffi_platform.ConstantInteger( "SSL_RECEIVED_SHUTDOWN") SSL_MODE_AUTO_RETRY = rffi_platform.ConstantInteger("SSL_MODE_AUTO_RETRY") + SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER = rffi_platform.ConstantInteger("SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER") NID_subject_alt_name = rffi_platform.ConstantInteger("NID_subject_alt_name") GEN_DIRNAME = rffi_platform.ConstantInteger("GEN_DIRNAME") From noreply at buildbot.pypy.org Wed Jul 17 14:25:58 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Wed, 17 Jul 2013 14:25:58 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: ADD CPPFLAGS and LDFLAGS Message-ID: <20130717122558.B86231C0651@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: release-2.1.x Changeset: r65434:32794dd704b3 Date: 2013-07-13 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/32794dd704b3/ Log: ADD CPPFLAGS and LDFLAGS (transplanted from d13af1390dae6871df33f602ed984577dc06a4de) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -124,11 +125,19 @@ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() + cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( From noreply at buildbot.pypy.org Wed Jul 17 14:25:59 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Wed, 17 Jul 2013 14:25:59 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: port CPython's implementation of customie_compiler, dont run split on env vars - maybe problematic, support LDSHARED, CPPFLAGS, CFLAGS andLDFLAGS Message-ID: <20130717122559.E480F1C0651@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: release-2.1.x Changeset: r65435:0ffbe5bae781 Date: 2013-07-14 17:36 +0200 http://bitbucket.org/pypy/pypy/changeset/0ffbe5bae781/ Log: port CPython's implementation of customie_compiler, dont run split on env vars - maybe problematic, support LDSHARED, CPPFLAGS, CFLAGS andLDFLAGS (transplanted from e52527f04d73593b095f56c33d5e75beaa96ebf9) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,9 +12,9 @@ import sys import os -import shlex from distutils.errors import DistutilsPlatformError +from distutils import log; log.set_verbosity(1) PREFIX = os.path.normpath(sys.prefix) @@ -66,6 +66,12 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' + global _config_vars _config_vars = g @@ -123,21 +129,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CPPFLAGS" in os.environ: - cppflags = shlex.split(os.environ["CPPFLAGS"]) - compiler.compiler.extend(cppflags) - compiler.compiler_so.extend(cppflags) - compiler.linker_so.extend(cppflags) - if "CFLAGS" in os.environ: - cflags = shlex.split(os.environ["CFLAGS"]) - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) - if "LDFLAGS" in os.environ: - ldflags = shlex.split(os.environ["LDFLAGS"]) - compiler.linker_so.extend(ldflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( From noreply at buildbot.pypy.org Wed Jul 17 14:26:01 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Wed, 17 Jul 2013 14:26:01 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: whatsnew Message-ID: <20130717122601.254661C0651@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: release-2.1.x Changeset: r65436:f0d5bf1285a3 Date: 2013-07-14 17:46 +0200 http://bitbucket.org/pypy/pypy/changeset/f0d5bf1285a3/ Log: whatsnew (transplanted from 32ef954a6c26b6d8606515af56b4ffb03f453762) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -81,3 +81,7 @@ Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch to optionally skip it. +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + From noreply at buildbot.pypy.org Wed Jul 17 14:26:02 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Wed, 17 Jul 2013 14:26:02 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: oups, didnt intend to commit set_verbosity Message-ID: <20130717122602.7A88A1C0651@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: release-2.1.x Changeset: r65437:ec326d132dd7 Date: 2013-07-14 17:49 +0200 http://bitbucket.org/pypy/pypy/changeset/ec326d132dd7/ Log: oups, didnt intend to commit set_verbosity (transplanted from 0c6eeae0316c11146f47fcf83e21e24f11378be1) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -14,7 +14,6 @@ import os from distutils.errors import DistutilsPlatformError -from distutils import log; log.set_verbosity(1) PREFIX = os.path.normpath(sys.prefix) @@ -72,7 +71,6 @@ g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' g['LDSHARED'] = g['CC'] + ' -shared' - global _config_vars _config_vars = g From noreply at buildbot.pypy.org Wed Jul 17 14:26:03 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 17 Jul 2013 14:26:03 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: update whatsnew-2.1 Message-ID: <20130717122603.A23E51C0651@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65438:b607f4d7888d Date: 2013-07-17 14:22 +0200 http://bitbucket.org/pypy/pypy/changeset/b607f4d7888d/ Log: update whatsnew-2.1 diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -81,3 +81,6 @@ Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch to optionally skip it. +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. From noreply at buildbot.pypy.org Wed Jul 17 14:26:04 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 17 Jul 2013 14:26:04 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: update whatsnew Message-ID: <20130717122604.C11AA1C0651@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65439:5b8ff324b80a Date: 2013-07-17 14:24 +0200 http://bitbucket.org/pypy/pypy/changeset/5b8ff324b80a/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -84,3 +84,5 @@ .. branch: distutils-cppldflags Copy CPython's implementation of customize_compiler, dont call split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + +.. branch: ssl_moving_write_buffer diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -85,3 +85,4 @@ Copy CPython's implementation of customize_compiler, dont call split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. +.. branch: ssl_moving_write_buffer From noreply at buildbot.pypy.org Wed Jul 17 15:21:49 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 17 Jul 2013 15:21:49 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: Start a branch to manually manipulate jit parameters per function Message-ID: <20130717132149.B683A1C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r65440:965c645286ae Date: 2013-07-17 15:20 +0200 http://bitbucket.org/pypy/pypy/changeset/965c645286ae/ Log: Start a branch to manually manipulate jit parameters per function diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -18,6 +18,8 @@ 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', 'Box': 'interp_resop.WrappedBox', 'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)', + 'set_local_threshold': 'interp_jit.set_local_threshold', + 'set_local_bridge_threshold': 'interp_jit.set_local_bridge_threshold', } def setup_after_space_initialization(self): diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -13,6 +13,7 @@ from pypy.interpreter.pycode import PyCode, CO_GENERATOR from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame +from pypy.interpreter.gateway import unwrap_spec from opcode import opmap PyFrame._virtualizable2_ = ['last_instr', 'pycode', @@ -51,11 +52,15 @@ greens = ['next_instr', 'is_being_profiled', 'pycode'] virtualizables = ['frame'] +def start_bridge_threshold(next_instr, is_being_profiled, bytecode): + return bytecode.bridge_init_threshold + pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, get_jitcell_at = get_jitcell_at, set_jitcell_at = set_jitcell_at, should_unroll_one_iteration = should_unroll_one_iteration, + start_bridge_threshold=start_bridge_threshold, name='pypyjit') class __extend__(PyFrame): @@ -117,6 +122,7 @@ def _initialize(self): PyCode__initialize(self) self.jit_cells = {} + self.bridge_init_threshold = 0 def _cleanup_(self): self.jit_cells = {} @@ -162,3 +168,16 @@ '''For testing. Invokes callable(...), but without letting the JIT follow the call.''' return space.call_args(w_callable, __args__) + + at unwrap_spec(w_code=PyCode, pos=int, value=int) +def set_local_threshold(space, w_code, pos, value): + """ set_local_threshold(code, pos, value) + + For testing. Set the threshold for this code object at position pos + at value given. + """ + w_code.jit_cells[pos << 1] = value # we ignore the profiling case + + at unwrap_spec(w_code=PyCode, value=int) +def set_local_bridge_threshold(space, w_code, value): + w_code.bridge_init_threshold = value diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -481,6 +481,7 @@ get_jitcell_at=None, set_jitcell_at=None, get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, + start_bridge_threshold=None, name='jitdriver', check_untranslated=True): if greens is not None: self.greens = greens @@ -517,6 +518,7 @@ self.can_never_inline = can_never_inline self.should_unroll_one_iteration = should_unroll_one_iteration self.check_untranslated = check_untranslated + self.start_bridge_threshold = start_bridge_threshold def _freeze_(self): return True From noreply at buildbot.pypy.org Wed Jul 17 16:39:50 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 17 Jul 2013 16:39:50 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: grrr Message-ID: <20130717143950.67D591C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r65441:fb34a38a0ec4 Date: 2013-07-17 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/fb34a38a0ec4/ Log: grrr diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -176,7 +176,7 @@ For testing. Set the threshold for this code object at position pos at value given. """ - w_code.jit_cells[pos << 1] = value # we ignore the profiling case + w_code.jit_cells[pos << 1] = r_uint(value) # we ignore the profiling case @unwrap_spec(w_code=PyCode, value=int) def set_local_bridge_threshold(space, w_code, value): From noreply at buildbot.pypy.org Wed Jul 17 17:04:30 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 17 Jul 2013 17:04:30 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: allow a different number of args Message-ID: <20130717150430.DFD761C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65442:25fdf2bb3fc7 Date: 2013-07-17 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/25fdf2bb3fc7/ Log: allow a different number of args diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2267,38 +2267,39 @@ assert s.data.tid == value def test_cond_call(self): - called = [] - - def func_void(arg): - called.append(arg) - - FUNC = self.FuncType([lltype.Signed], lltype.Void) - func_ptr = llhelper(lltype.Ptr(FUNC), func_void) - calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, - EffectInfo.MOST_GENERAL) - - ops = ''' - [i0, i1, i2, i3, i4, i5, i6, f0, f1] - cond_call(i1, ConstClass(func_ptr), i2, descr=calldescr) - guard_false(i0, descr=faildescr) [i1, i2, i3, i4, i5, i6, f0, f1] - ''' - loop = parse(ops, namespace={'faildescr': BasicFailDescr(), - 'func_ptr': func_ptr, - 'calldescr': calldescr}) - looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, 1.2, 3.4) - assert not called + def func_void(*args): + called.append(args) + for i in range(5): - assert self.cpu.get_int_value(frame, i) == i - assert self.cpu.get_float_value(frame, 6) == 1.2 - assert self.cpu.get_float_value(frame, 7) == 3.4 - frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, 1.2, 3.4) - assert called == [1] - for i in range(4): - assert self.cpu.get_int_value(frame, i + 1) == i + 1 - assert self.cpu.get_float_value(frame, 6) == 1.2 - assert self.cpu.get_float_value(frame, 7) == 3.4 + called = [] + + FUNC = self.FuncType([lltype.Signed] * i, lltype.Void) + func_ptr = llhelper(lltype.Ptr(FUNC), func_void) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, f0, f1] + cond_call(i1, ConstClass(func_ptr), %s, descr=calldescr) + guard_false(i0, descr=faildescr) [i1, i2, i3, i4, i5, i6, f0, f1] + ''' % ', '.join(['i%d' % (j + 2) for j in range(i)]) + loop = parse(ops, namespace={'faildescr': BasicFailDescr(), + 'func_ptr': func_ptr, + 'calldescr': calldescr}) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, 1.2, 3.4) + assert not called + for j in range(5): + assert self.cpu.get_int_value(frame, j) == j + assert self.cpu.get_float_value(frame, 6) == 1.2 + assert self.cpu.get_float_value(frame, 7) == 3.4 + frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, 1.2, 3.4) + assert called == [tuple(range(1, i + 1))] + for j in range(4): + assert self.cpu.get_int_value(frame, j + 1) == j + 1 + assert self.cpu.get_float_value(frame, 6) == 1.2 + assert self.cpu.get_float_value(frame, 7) == 3.4 def test_force_operations_returning_void(self): values = [] diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -56,6 +56,7 @@ no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] + register_arguments = [edi, esi, edx, ecx] class X86XMMRegisterManager(RegisterManager): @@ -800,9 +801,13 @@ def consider_cond_call(self, op): assert op.result is None args = op.getarglist() - assert len(args) == 1 + 2 - self.make_sure_var_in_reg(args[2], selected_reg=edi) + assert 2 <= len(args) <= 4 + 2 loc_call = self.make_sure_var_in_reg(args[1], args, selected_reg=eax) + args_so_far = [args[1]] + for i in range(2, len(args)): + reg = self.rm.register_arguments[i - 2] + self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) + args_so_far.append(args[i]) loc_cond = self.make_sure_var_in_reg(args[0], args) self.assembler.cond_call(op, self.get_gcmap(), loc_cond, loc_call, [edi]) From noreply at buildbot.pypy.org Wed Jul 17 17:25:29 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 17 Jul 2013 17:25:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: start london announcement Message-ID: <20130717152529.7972A1C0651@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r4989:eb280dd24330 Date: 2013-07-17 17:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/eb280dd24330/ Log: start london announcement diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/london-2013/announcement.txt @@ -0,0 +1,64 @@ +===================================================================== + PyPy London Sprint (August 26 - September 1 2013) +===================================================================== + +The next PyPy sprint will be in London, United Kingdom for the first +time. This is a fully public sprint: newcomers and topics other than +those proposed below are welcome. + + +------------------------------ +Goals and topics of the sprint +------------------------------ + +* whatever attendands find interesting :-) + +* refactoring the JIT optimizations + +* STM and STM-related topics + + +----------- +Exact times +----------- + +The work days should be August 26 - September 1 2013 (Monday-Sunday). +The official plans are for people to arrive on the 26th, and +to leave on the 2nd. There will be a break day in the middle. +We'll typically start at 10:00 in the morning. + + +------------ +Location +------------ + +The sprint will happen within a room of `King's College's`_ `Strand +Campus`_ in Central London, UK. + +.. _`King's College`: http://www.kcl.ac.uk/ +.. _`Strand Campus`: http://goo.gl/maps/Qz0zz + +------------ +Demo Morning +------------ + +If you don't want to come to the full sprint, but still want to chat a +bit, we are planning to have a demo morning on Tuesday August 27. We +will announce this separately on the blog. If you are interested, please +leave a comment. + +-------------- +Registration +-------------- + +If you want to attend, please register by adding yourself to the +"people.txt" file in Mercurial:: + + https://bitbucket.org/pypy/extradoc/ + https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/london-2013 + +or on the pypy-dev mailing list if you do not yet have check-in rights: + + http://mail.python.org/mailman/listinfo/pypy-dev + +Remember that you may need a UK-to-(insert country here) power adapter. diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/london-2013/people.txt @@ -0,0 +1,56 @@ + +People coming to the London sprint 2013 +================================================== + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available from them. + + +==================== ============== ======================= + Name Arrive/Depart Accomodation +==================== ============== ======================= +Carl Friedrich Bolz ? ? +==================== ============== ======================= + + +People on the following list were present at previous sprints: + +==================== ============== ===================== + Name Arrive/Depart Accomodation +==================== ============== ===================== +Antonio Cuni ? ? +Michael Foord ? ? +Maciej Fijalkowski ? ? +David Schneider ? ? +Jacob Hallen ? ? +Laura Creighton ? ? +Hakan Ardo ? ? +Samuele Pedroni ? ? +Anders Hammarquist ? ? +Christian Tismer ? ? +Niko Matsakis ? ? +Toby Watson ? ? +Paul deGrandis ? ? +Michael Hudson ? ? +Anders Lehmann ? ? +Niklaus Haldimann ? ? +Lene Wagner ? ? +Amaury Forgeot d'Arc ? ? +Valentino Volonghi ? ? +Boris Feigin ? ? +Andrew Thompson ? ? +Bert Freudenberg ? ? +Beatrice Duering ? ? +Richard Emslie ? ? +Johan Hahn ? ? +Stephan Diehl ? ? +Alexander Schremmer ? ? +Anders Chrigstroem ? ? +Eric van Riet Paap ? ? +Holger Krekel ? ? +Guido Wesdorp ? ? +Leonardo Santagada ? ? +Alexandre Fayolle ? ? +Sylvain Th�nault ? ? +==================== ============== ===================== From noreply at buildbot.pypy.org Wed Jul 17 17:52:35 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 17 Jul 2013 17:52:35 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: gcmap support Message-ID: <20130717155235.0D9B41C2FF2@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65443:0b84ace95452 Date: 2013-07-17 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/0b84ace95452/ Log: gcmap support diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -690,6 +690,36 @@ item = rffi.cast(lltype.Ptr(S), frame.jf_frame[gcmap[0]]) assert item == new_items[2] + def test_shadowstack_cond_call(self): + cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + + def check(i, frame): + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, frame) + assert frame.jf_gcmap[0] # is not empty is good enough + + CHECK = lltype.FuncType([lltype.Signed, llmemory.GCREF], lltype.Void) + checkptr = llhelper(lltype.Ptr(CHECK), check) + checkdescr = cpu.calldescrof(CHECK, CHECK.ARGS, CHECK.RESULT, + EffectInfo.MOST_GENERAL) + + loop = self.parse(""" + [i0, p0] + p = force_token() + cond_call(i0, ConstClass(funcptr), i0, p, descr=calldescr) + guard_true(i0, descr=faildescr) [p0] + """, namespace={ + 'faildescr': BasicFailDescr(), + 'funcptr': checkptr, + 'calldescr': checkdescr, + }) + token = JitCellToken() + cpu.compile_loop(loop.inputargs, loop.operations, token) + S = self.S + s = lltype.malloc(S) + cpu.execute_token(token, 1, s) + def test_shadowstack_collecting_call_float(self): cpu = self.cpu diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2145,12 +2145,13 @@ self.mc.TEST(cond_loc, cond_loc) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() - self.push_gcmap(self.mc, gcmap, mov=True) + self.push_gcmap(self.mc, gcmap, store=True) self.mc.CALL(imm(self.cond_call_slowpath[len(arglocs)])) + self.pop_gcmap(self.mc) # never any result value offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 - self.mc.overwrite(jmp_adr-1, chr(offset)) + self.mc.overwrite(jmp_adr-1, chr(offset)) def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned From noreply at buildbot.pypy.org Wed Jul 17 17:57:43 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 17 Jul 2013 17:57:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add a very generic porting task Message-ID: <20130717155743.0C1141C2FF2@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r4990:0841c271298f Date: 2013-07-17 17:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/0841c271298f/ Log: add a very generic porting task diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -11,12 +11,13 @@ Goals and topics of the sprint ------------------------------ -* whatever attendands find interesting :-) +* porting applications or libraries to run on PyPy * refactoring the JIT optimizations * STM and STM-related topics +* whatever attendands find interesting :-) ----------- Exact times From noreply at buildbot.pypy.org Wed Jul 17 18:04:52 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 17 Jul 2013 18:04:52 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: finish cond_call for x86_64 Message-ID: <20130717160452.A4CCC1C2FF2@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65444:1c382497f8eb Date: 2013-07-17 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/1c382497f8eb/ Log: finish cond_call for x86_64 diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -106,7 +106,7 @@ kind='unicode') else: self.malloc_slowpath_unicode = None - self.cond_call_slowpath = [0, self._build_cond_call_slowpath(1)] + self.cond_call_slowpath = self._build_cond_call_slowpath() self._build_stack_check_slowpath() self._build_release_gil(gc_ll_descr.gcrootmap) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -149,18 +149,22 @@ mc.RET() self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) - def _build_cond_call_slowpath(self, no_args): + def _build_cond_call_slowpath(self): """ This builds a general call slowpath, for whatever call happens to come. """ mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats, callee_only=False) - assert no_args == 1 + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(mc, gcrootmap) mc.SUB(esp, imm(WORD)) # first arg is always in edi mc.CALL(eax) mc.ADD(esp, imm(WORD)) + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(mc, gcrootmap) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats, callee_only=False) mc.RET() @@ -718,7 +722,7 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._call_header_shadowstack(gcrootmap) + self._call_header_shadowstack(self.mc, gcrootmap) def _call_header_with_stack_check(self): self._call_header() @@ -741,7 +745,7 @@ def _call_footer(self): gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._call_footer_shadowstack(gcrootmap) + self._call_footer_shadowstack(self.mc, gcrootmap) for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.MOV_rs(self.cpu.CALLEE_SAVE_REGISTERS[i].value, @@ -762,23 +766,23 @@ # return rst - def _call_header_shadowstack(self, gcrootmap): - rst = self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) - self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp - self.mc.ADD_ri(ebx.value, WORD) + def _call_header_shadowstack(self, mc, gcrootmap): + rst = self._load_shadowstack_top_in_ebx(mc, gcrootmap) + mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp + mc.ADD_ri(ebx.value, WORD) if rx86.fits_in_32bits(rst): - self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx + mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: - self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), - ebx.value) # MOV [r11], ebx + mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), + ebx.value) # MOV [r11], ebx - def _call_footer_shadowstack(self, gcrootmap): + def _call_footer_shadowstack(self, mc, gcrootmap): rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): - self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD + mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: - self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop - self.mc.SUB_mi8((ebx.value, 0), WORD) # SUB [ebx], WORD + mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop + mc.SUB_mi8((ebx.value, 0), WORD) # SUB [ebx], WORD def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking @@ -2146,7 +2150,7 @@ self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() self.push_gcmap(self.mc, gcmap, store=True) - self.mc.CALL(imm(self.cond_call_slowpath[len(arglocs)])) + self.mc.CALL(imm(self.cond_call_slowpath)) self.pop_gcmap(self.mc) # never any result value offset = self.mc.get_relative_pos() - jmp_adr From noreply at buildbot.pypy.org Wed Jul 17 19:18:25 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 17 Jul 2013 19:18:25 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: optimization (amaury) Message-ID: <20130717171825.8B5811C02BA@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65445:4b08bbbfbd22 Date: 2013-07-17 19:42 +0300 http://bitbucket.org/pypy/pypy/changeset/4b08bbbfbd22/ Log: optimization (amaury) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -539,7 +539,7 @@ if current_guess is complex_type: return complex_type if space.isinstance_w(w_obj, space.w_float): - return interp_dtype.get_dtype_cache(space).w_float64dtype + return float_type elif space.isinstance_w(w_obj, space.w_slice): return long_dtype raise operationerrfmt(space.w_NotImplementedError, From noreply at buildbot.pypy.org Wed Jul 17 19:18:26 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 17 Jul 2013 19:18:26 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: fixes from review (amaury) Message-ID: <20130717171826.CAF8B1C02BA@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65446:416b328ff61a Date: 2013-07-17 20:06 +0300 http://bitbucket.org/pypy/pypy/changeset/416b328ff61a/ Log: fixes from review (amaury) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -33,7 +33,7 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', w_subtype=None): + def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -42,12 +42,8 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) - if w_subtype: - w_ret = space.allocate_instance(W_NDimArray, space.type(w_subtype)) - W_NDimArray.__init__(w_ret, impl) - assert isinstance(w_ret, W_NDimArray) - space.call_method(w_ret, '__array_finalize__', w_subtype) - return w_ret + if w_instance: + return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @staticmethod diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -151,7 +151,7 @@ arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] - w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=arr) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, orig_size)]).apply(space, w_res).implementation.setslice(space, arr) @@ -161,7 +161,7 @@ chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_subtype=arr) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -65,7 +65,7 @@ if length == 1: return base_iter.getitem() res = W_NDimArray.from_shape(space, [length], base.get_dtype(), - base.get_order(), w_subtype=base) + base.get_order(), w_instance=base) return loop.flatiter_getitem(res, base_iter, step) def descr_setitem(self, space, w_idx, w_value): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -85,7 +85,7 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_subtype=self) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): @@ -148,7 +148,7 @@ return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), - self.get_order(), w_subtype=self) + self.get_order(), w_instance=self) if not w_res.get_size(): return w_res return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, @@ -482,7 +482,7 @@ loop.byteswap(self.implementation, self.implementation) return self else: - w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_subtype=self) + w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_instance=self) loop.byteswap(self.implementation, w_res.implementation) return w_res @@ -778,7 +778,7 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_subtype=self) + w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -182,7 +182,7 @@ if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, - w_subtype=obj) + w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -208,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(space, shape, dtype, w_subtype=obj) + out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -217,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_subtype=obj) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -47,7 +47,7 @@ if out is None: out = W_NDimArray.from_shape(space, shape, res_dtype, - w_subtype=lhs_for_subtype) + w_instance=lhs_for_subtype) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -76,7 +76,7 @@ def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: - out = W_NDimArray.from_shape(space, shape, res_dtype, w_subtype=w_obj) + out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) From noreply at buildbot.pypy.org Wed Jul 17 19:18:28 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 17 Jul 2013 19:18:28 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: test, fix call2 for array, non-array Message-ID: <20130717171828.1198E1C02BA@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: ndarray-subtype Changeset: r65447:cf08f4e9b3d8 Date: 2013-07-17 20:17 +0300 http://bitbucket.org/pypy/pypy/changeset/cf08f4e9b3d8/ Log: test, fix call2 for array, non-array diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -40,7 +40,6 @@ rhs_type = space.type(w_rhs.base) rhs_for_subtype = w_rhs.base if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray): - w_lhs, w_rhs = w_rhs, w_lhs lhs_for_subtype = rhs_for_subtype # TODO handle __array_priorities__ and maybe flip the order diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -199,6 +199,9 @@ assert isinstance(c, self.SubType) c = a + b assert isinstance(c, self.NoNew) + d = range(12) + e = a - d + assert isinstance(e, self.NoNew) def test_sub_call1(self): from numpypy import array, sqrt From noreply at buildbot.pypy.org Wed Jul 17 20:59:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 20:59:42 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: In-progress: move the weakref code in its own file, and start writing Message-ID: <20130717185942.6144D1C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: weakref Changeset: r406:698e0c3f3413 Date: 2013-07-17 18:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/698e0c3f3413/ Log: In-progress: move the weakref code in its own file, and start writing logic for major collections. diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -16,10 +16,10 @@ H_FILES = atomic_ops.h stmgc.h stmimpl.h \ et.h lists.h steal.h nursery.h gcpage.h \ - stmsync.h extra.h dbgmem.h fprintcolor.h + stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h C_FILES = et.c lists.c steal.c nursery.c gcpage.c \ - stmsync.c extra.c dbgmem.c fprintcolor.c + stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -649,6 +649,8 @@ int i; wlog_t *item; + stm_invalidate_old_weakrefs(gcp); + for (i = 1; i < GC_SMALL_REQUESTS; i++) { sweep_pages(gcp, i); } diff --git a/c4/gcpage.h b/c4/gcpage.h --- a/c4/gcpage.h +++ b/c4/gcpage.h @@ -45,7 +45,8 @@ /* These fields are in tx_public_descriptor rather than tx_descriptor. The indirection allows us to keep around the lists of pages even - after the thread finishes, until the next major collection. + after the thread finishes. Such a "zombie" tx_public_descriptor + is reused by the next thread that starts. */ #define GCPAGE_FIELDS_DECL \ /* The array 'pages_for_size' contains GC_SMALL_REQUESTS \ @@ -65,7 +66,10 @@ /* A set of all non-small objects (outside the nursery). \ We could also have a single global set, but this avoids \ locking in stmgcpage_malloc/free. */ \ - struct G2L nonsmall_objects; + struct G2L nonsmall_objects; \ + \ + /* Weakref support */ \ + struct GcPtrList old_weakrefs; #define LOCAL_GCPAGES() (thread_descriptor->public_descriptor) diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -101,15 +101,6 @@ return P; } -gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj) -{ - gcptr weakref = stm_allocate(size, tid); - assert(stmgc_size(weakref) == size); - WEAKREF_PTR(weakref, size) = obj; - gcptrlist_insert(&thread_descriptor->young_weakrefs, weakref); - return weakref; -} - gcptr stmgc_duplicate(gcptr P) { size_t size = stmgc_size(P); @@ -439,27 +430,6 @@ fxcache_clear(&d->recent_reads_cache); } -static void move_young_weakrefs(struct tx_descriptor *d) -{ - while (gcptrlist_size(&d->young_weakrefs) > 0) { - gcptr weakref = gcptrlist_pop(&d->young_weakrefs); - if (!(weakref->h_tid & GCFLAG_NURSERY_MOVED)) - continue; /* the weakref itself dies */ - - weakref = (gcptr)weakref->h_revision; - size_t size = stmgc_size(weakref); - gcptr obj = WEAKREF_PTR(weakref, size); - if (!is_in_nursery(d, obj)) - continue; /* the pointer does not change */ - - if (obj->h_tid & GCFLAG_NURSERY_MOVED) - obj = obj->h_revision; - else - obj = NULL; - WEAKREF_PTR(weakref, size) = obj; - } -} - static void setup_minor_collect(struct tx_descriptor *d) { spinlock_acquire(d->public_descriptor->collection_lock, 'M'); /*minor*/ @@ -507,7 +477,7 @@ surviving young-but-outside-the-nursery objects have been flagged with GCFLAG_OLD */ - move_young_weakrefs(d); + stm_move_young_weakrefs(d); teardown_minor_collect(d); assert(!stm_has_got_any_lock(d)); diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -68,7 +68,4 @@ void stmgc_trace(gcptr, void visit(gcptr *)); void stmgc_minor_collect_soon(void); -#define WEAKREF_PTR(wr, sz) (*(gcptr *)(((char *)(wr)) + (sz) - WORD)) - - #endif diff --git a/c4/stmgc.c b/c4/stmgc.c --- a/c4/stmgc.c +++ b/c4/stmgc.c @@ -10,5 +10,6 @@ #include "gcpage.c" #include "stmsync.c" #include "extra.c" +#include "weakref.c" #include "dbgmem.c" #include "fprintcolor.c" diff --git a/c4/stmimpl.h b/c4/stmimpl.h --- a/c4/stmimpl.h +++ b/c4/stmimpl.h @@ -36,5 +36,6 @@ #include "steal.h" #include "stmsync.h" #include "extra.h" +#include "weakref.h" #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -11,11 +11,11 @@ header_files = [os.path.join(parent_dir, _n) for _n in "et.h lists.h steal.h nursery.h gcpage.h " - "stmsync.h extra.h dbgmem.h fprintcolor.h " + "stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in "et.c lists.c steal.c nursery.c gcpage.c " - "stmsync.c extra.c dbgmem.c fprintcolor.c".split()] + "stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -319,45 +319,3 @@ def test_collect_soon(): lib.stmgc_minor_collect_soon() nalloc(HDR) - -def test_weakref_invalidate(): - p2 = nalloc(HDR) - p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - assert p1.h_tid == WEAKREF_TID # no GC flags - assert p1.h_revision == lib.get_private_rev_num() - assert lib.rawgetptr(p1, 0) == p2 - lib.stm_push_root(p1) - minor_collect() - p1 = lib.stm_pop_root() - assert lib.rawgetptr(p1, 0) == ffi.NULL - -def test_weakref_itself_dies(): - p2 = nalloc(HDR) - p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - minor_collect() - -def test_weakref_keep(): - p2 = nalloc(HDR) - p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - assert p1.h_tid == WEAKREF_TID # no GC flags - assert p1.h_revision == lib.get_private_rev_num() - assert lib.rawgetptr(p1, 0) == p2 - lib.stm_push_root(p1) - lib.stm_push_root(p2) - minor_collect() - p2 = lib.stm_pop_root() - p1 = lib.stm_pop_root() - assert lib.rawgetptr(p1, 0) == p2 - -def test_weakref_old_keep(): - p2 = oalloc(HDR) - p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - assert p1.h_tid == WEAKREF_TID # no GC flags - assert p1.h_revision == lib.get_private_rev_num() - assert lib.rawgetptr(p1, 0) == p2 - lib.stm_push_root(p1) - lib.stm_push_root(p2) - minor_collect() - p2 = lib.stm_pop_root() - p1 = lib.stm_pop_root() - assert lib.rawgetptr(p1, 0) == p2 diff --git a/c4/test/test_weakref.py b/c4/test/test_weakref.py new file mode 100644 --- /dev/null +++ b/c4/test/test_weakref.py @@ -0,0 +1,120 @@ +import py +from support import * + + +class BaseTest(object): + def setup_method(self, meth): + lib.stm_clear_between_tests() + lib.stm_initialize_tests(0) + def teardown_method(self, meth): + lib.stm_finalize() + + +class TestMinorCollection(BaseTest): + + def test_weakref_invalidate(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + minor_collect() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == ffi.NULL + + def test_weakref_itself_dies(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + minor_collect() + + def test_weakref_keep(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + minor_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + + def test_weakref_old_keep(self): + p2 = oalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + minor_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + + +class TestMajorCollection(BaseTest): + + def test_weakref_old(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + # + lib.stm_push_root(p1) + major_collect() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == ffi.NULL + + def test_weakref_to_prebuilt(self): + p2 = palloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # + lib.stm_push_root(p1) + major_collect() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + + def test_weakref_update_version(self): + p2 = oalloc(HDR + WORD); make_public(p2) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + # + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + # + lib.setlong(p2, 0, 912809218) # write barrier + assert lib.rawgetlong(p2, 0) == 0 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + assert lib.rawgetlong(p2, 0) == 0 + # + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + # + assert lib.rawgetlong(p2, 0) == 0 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2b = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + assert p2b != p2 + assert lib.rawgetlong(p2b, 0) == 912809218 diff --git a/c4/weakref.c b/c4/weakref.c new file mode 100644 --- /dev/null +++ b/c4/weakref.c @@ -0,0 +1,92 @@ +#include "stmimpl.h" + +#define WEAKREF_PTR(wr, sz) (*(gcptr *)(((char *)(wr)) + (sz) - WORD)) + + +gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj) +{ + gcptr weakref = stm_allocate(size, tid); + assert(!(weakref->h_tid & GCFLAG_OLD)); /* 'size' too big? */ + assert(stmgc_size(weakref) == size); + WEAKREF_PTR(weakref, size) = obj; + gcptrlist_insert(&thread_descriptor->young_weakrefs, weakref); + return weakref; +} + + +/***** Minor collection *****/ + +static int is_in_nursery(struct tx_descriptor *d, gcptr obj) +{ + return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); +} + +void stm_move_young_weakrefs(struct tx_descriptor *d) +{ + /* The code relies on the fact that no weakref can be an old object + weakly pointing to a young object. Indeed, weakrefs are immutable + so they cannot point to an object that was created after it. + */ + while (gcptrlist_size(&d->young_weakrefs) > 0) { + gcptr weakref = gcptrlist_pop(&d->young_weakrefs); + if (!(weakref->h_tid & GCFLAG_NURSERY_MOVED)) + continue; /* the weakref itself dies */ + + weakref = (gcptr)weakref->h_revision; + size_t size = stmgc_size(weakref); + gcptr pointing_to = WEAKREF_PTR(weakref, size); + assert(pointing_to != NULL); + + if (is_in_nursery(d, pointing_to)) { + if (pointing_to->h_tid & GCFLAG_NURSERY_MOVED) { + WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision; + } + else { + WEAKREF_PTR(weakref, size) = NULL; + continue; /* no need to remember this weakref any longer */ + } + } + else { + /* # see test_weakref_to_prebuilt: it's not useful to put + # weakrefs into 'old_objects_with_weakrefs' if they point + # to a prebuilt object (they are immortal). If moreover + # the 'pointing_to' prebuilt object still has the + # GCFLAG_NO_HEAP_PTRS flag, then it's even wrong, because + # 'pointing_to' will not get the GCFLAG_VISITED during + # the next major collection. Solve this by not registering + # the weakref into 'old_objects_with_weakrefs'. + */ + } + gcptrlist_insert(&d->public_descriptor->old_weakrefs, weakref); + } +} + + +/***** Major collection *****/ + +void stm_invalidate_old_weakrefs(struct tx_public_descriptor *gcp) +{ + /* walk over list of objects that contain weakrefs. If the + object it references does not survive, invalidate the weakref */ + long i; + gcptr *items = gcp->old_weakrefs.items; + + for (i = gcp->old_weakrefs.size - 1; i >= 0; i--) { + gcptr weakref = items[i]; + + if (!(weakref->h_tid & GCFLAG_VISITED)) { + /* weakref itself dies */ + } + else { + size_t size = stmgc_size(weakref); + gcptr pointing_to = WEAKREF_PTR(weakref, size); + //...; + abort(); + } + + /* remove this weakref from the list */ + items[i] = items[--gcp->old_weakrefs.size]; + } + + gcptrlist_compress(&gcp->old_weakrefs); +} diff --git a/c4/weakref.h b/c4/weakref.h new file mode 100644 --- /dev/null +++ b/c4/weakref.h @@ -0,0 +1,9 @@ +#ifndef _SRCSTM_WEAKREF_H +#define _SRCSTM_WEAKREF_H + + +void stm_move_young_weakrefs(struct tx_descriptor *); +void stm_invalidate_old_weakrefs(struct tx_public_descriptor *); + + +#endif From noreply at buildbot.pypy.org Wed Jul 17 20:59:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 17 Jul 2013 20:59:43 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: Weakrefs in major collections. Tests are a bit light here given that there Message-ID: <20130717185943.AD3C91C021A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: weakref Changeset: r407:69291ba64476 Date: 2013-07-17 19:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/69291ba64476/ Log: Weakrefs in major collections. Tests are a bit light here given that there are a lot of possible corner cases. diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -222,11 +222,13 @@ if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - id_copy->h_tid |= GCFLAG_VISITED; + if (!(id_copy->h_tid & GCFLAG_VISITED)) { + id_copy->h_tid |= GCFLAG_VISITED; - /* XXX: may not always need tracing? */ - if (!(id_copy->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, id_copy); + /* XXX: may not always need tracing? */ + if (!(id_copy->h_tid & GCFLAG_STUB)) + gcptrlist_insert(&objects_to_trace, id_copy); + } } else { /* prebuilt originals won't get collected anyway @@ -236,6 +238,14 @@ } } +static void visit(gcptr *pobj); + +gcptr stmgcpage_visit(gcptr obj) +{ + visit(&obj); + return obj; +} + static void visit(gcptr *pobj) { gcptr obj = *pobj; @@ -275,10 +285,10 @@ keep_original_alive(prev_obj); assert(*pobj == prev_obj); - gcptr obj1 = obj; - visit(&obj1); /* recursion, but should be only once */ + /* recursion, but should be only once */ + obj = stmgcpage_visit(obj); assert(prev_obj->h_tid & GCFLAG_STUB); - prev_obj->h_revision = ((revision_t)obj1) | 2; + prev_obj->h_revision = ((revision_t)obj) | 2; return; } } @@ -649,8 +659,6 @@ int i; wlog_t *item; - stm_invalidate_old_weakrefs(gcp); - for (i = 1; i < GC_SMALL_REQUESTS; i++) { sweep_pages(gcp, i); } @@ -777,9 +785,13 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); mark_all_stack_roots(); - visit_all_objects(); + do { + visit_all_objects(); + stm_visit_old_weakrefs(); + } while (gcptrlist_size(&objects_to_trace) != 0); gcptrlist_delete(&objects_to_trace); clean_up_lists_of_read_objects_and_fix_outdated_flags(); + stm_clean_old_weakrefs(); mc_total_in_use = mc_total_reserved = 0; free_all_unused_local_pages(); diff --git a/c4/gcpage.h b/c4/gcpage.h --- a/c4/gcpage.h +++ b/c4/gcpage.h @@ -84,6 +84,7 @@ void stmgcpage_add_prebuilt_root(gcptr obj); void stmgcpage_possibly_major_collect(int force); long stmgcpage_count(int quantity); +gcptr stmgcpage_visit(gcptr); extern struct GcPtrList stm_prebuilt_gcroots; diff --git a/c4/test/test_weakref.py b/c4/test/test_weakref.py --- a/c4/test/test_weakref.py +++ b/c4/test/test_weakref.py @@ -115,6 +115,6 @@ major_collect() p2b = lib.stm_pop_root() p1 = lib.stm_pop_root() - assert lib.rawgetptr(p1, 0) == p2 + assert lib.rawgetptr(p1, 0) == p2b assert p2b != p2 - assert lib.rawgetlong(p2b, 0) == 912809218 + assert lib.getlong(p2b, 0) == 912809218 diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -64,29 +64,140 @@ /***** Major collection *****/ -void stm_invalidate_old_weakrefs(struct tx_public_descriptor *gcp) +static _Bool is_partially_visited(gcptr obj) { - /* walk over list of objects that contain weakrefs. If the - object it references does not survive, invalidate the weakref */ - long i; + /* Based on gcpage.c:visit(). Check the code here if we simplify + visit(). Returns True or False depending on whether we find any + version of 'obj' to be VISITED or not. + */ + restart: + if (obj->h_tid & GCFLAG_VISITED) + return 1; + + if (obj->h_revision & 1) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!(obj->h_tid & GCFLAG_STUB)); + return 0; + } + else if (obj->h_tid & GCFLAG_PUBLIC) { + /* h_revision is a ptr: we have a more recent version */ + if (!(obj->h_revision & 2)) { + /* go visit the more recent version */ + obj = (gcptr)obj->h_revision; + } + else { + /* it's a stub */ + assert(obj->h_tid & GCFLAG_STUB); + obj = (gcptr)(obj->h_revision - 2); + } + goto restart; + } + else { + assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + gcptr B = (gcptr)obj->h_revision; + assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); + if (B->h_tid & GCFLAG_VISITED) + return 1; + assert(!(obj->h_tid & GCFLAG_STUB)); + assert(!(B->h_tid & GCFLAG_STUB)); + + if (IS_POINTER(B->h_revision)) { + assert(B->h_tid & GCFLAG_PUBLIC); + assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(B->h_revision & 2)); + + obj = (gcptr)B->h_revision; + goto restart; + } + } + return 0; +} + +static void visit_old_weakrefs(struct tx_public_descriptor *gcp) +{ + /* Note: it's possible that a weakref points to a public stub to a + protected object, and only the protected object was marked as + VISITED so far. In this case, this function needs to mark the + public stub as VISITED too. + */ + long i, size = gcp->old_weakrefs.size; gcptr *items = gcp->old_weakrefs.items; - for (i = gcp->old_weakrefs.size - 1; i >= 0; i--) { + for (i = 0; i < size; i++) { gcptr weakref = items[i]; + /* weakrefs are immutable: during a major collection, they + cannot be in the nursery, and so there should be only one + version of each weakref object. XXX relying on this is + a bit fragile, but simplifies things a lot... */ + assert(weakref->h_revision & 1); + if (!(weakref->h_tid & GCFLAG_VISITED)) { - /* weakref itself dies */ + /* the weakref itself dies */ } else { size_t size = stmgc_size(weakref); gcptr pointing_to = WEAKREF_PTR(weakref, size); - //...; - abort(); + assert(pointing_to != NULL); + if (is_partially_visited(pointing_to)) { + pointing_to = stmgcpage_visit(pointing_to); + assert(pointing_to->h_tid & GCFLAG_VISITED); + WEAKREF_PTR(weakref, size) = pointing_to; + } + else { + /* the weakref appears to be pointing to a dying object, + but we don't know for sure now. Clearing it is left + to clean_old_weakrefs(). */ + } } + } +} +static void clean_old_weakrefs(struct tx_public_descriptor *gcp) +{ + long i, size = gcp->old_weakrefs.size; + gcptr *items = gcp->old_weakrefs.items; + + for (i = size - 1; i >= 0; i--) { + gcptr weakref = items[i]; + assert(weakref->h_revision & 1); + if (weakref->h_tid & GCFLAG_VISITED) { + size_t size = stmgc_size(weakref); + gcptr pointing_to = WEAKREF_PTR(weakref, size); + if (pointing_to->h_tid & GCFLAG_VISITED) { + continue; /* the target stays alive, the weakref remains */ + } + WEAKREF_PTR(weakref, size) = NULL; /* the target dies */ + } /* remove this weakref from the list */ items[i] = items[--gcp->old_weakrefs.size]; } - gcptrlist_compress(&gcp->old_weakrefs); } + +static void for_each_public_descriptor( + void visit(struct tx_public_descriptor *)) { + struct tx_descriptor *d; + for (d = stm_tx_head; d; d = d->tx_next) + visit(d->public_descriptor); + + struct tx_public_descriptor *gcp; + revision_t index = -1; + while ((gcp = stm_get_free_public_descriptor(&index)) != NULL) + visit(gcp); +} + +void stm_visit_old_weakrefs(void) +{ + /* Figure out which weakrefs survive, which possibly + adds more objects to 'objects_to_trace'. + */ + for_each_public_descriptor(visit_old_weakrefs); +} + +void stm_clean_old_weakrefs(void) +{ + /* Clean up the non-surviving weakrefs + */ + for_each_public_descriptor(clean_old_weakrefs); +} diff --git a/c4/weakref.h b/c4/weakref.h --- a/c4/weakref.h +++ b/c4/weakref.h @@ -3,7 +3,8 @@ void stm_move_young_weakrefs(struct tx_descriptor *); -void stm_invalidate_old_weakrefs(struct tx_public_descriptor *); +void stm_visit_old_weakrefs(void); +void stm_clean_old_weakrefs(void); #endif From noreply at buildbot.pypy.org Wed Jul 17 22:20:02 2013 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 17 Jul 2013 22:20:02 +0200 (CEST) Subject: [pypy-commit] buildbot numpy-tests: add builder to master Message-ID: <20130717202002.322D81C02BA@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpy-tests Changeset: r830:b37fcd808249 Date: 2013-07-17 23:19 +0300 http://bitbucket.org/pypy/buildbot/changeset/b37fcd808249/ Log: add builder to master diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -783,23 +783,30 @@ self.addStep(ShellCmd( description="install nose", - command=['download/bin/pip', 'install','nose'], + command=['install/bin/pip', 'install','nose'], workdir='pypy-c')) # obtain a pypy-compatible branch of numpy numpy_url = 'https://github.com/mattip/numpy' numpy_pypy_branch = 'pypy' - update_git(platform, factory, numpy_url, 'numpy_src', use_branch=True, + update_git(platform, self, numpy_url, 'numpy_src', use_branch=True, force_branch=numpy_pypy_branch) + if os.path.exists('pypy_c/download/lib_pypy/numpy.py'): + self.addStep(ShellCmd( + description="delete lib_pypy/numpy.*", + command=['rm', 'download/lib_pypy/numpy.*'], + workdir='pypy-c')) + + self.addStep(ShellCmd( description="install numpy", - command=['download/bin/python', 'setup.py','install'], + command=['install/bin/python', 'setup.py','install'], workdir='numpy_src')) self.addStep(ShellCmd( description="test numpy", - command=['download/bin/python', '-c', '"import numpy;numpy.test()"', + command=['install/bin/python', '-c', '"import numpy;numpy.test()"', '> pytest-numpy.log','2>&1'], logfiles={'pytestLog': 'pytest-numpy.log'}, timeout=4000, diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -151,6 +151,8 @@ pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', postfix='-64') +pypyNumpyCompatability = pypybuilds.NativeNumpyTests() + # LINUX32 = "own-linux-x86-32" @@ -182,7 +184,7 @@ JITBENCH64 = "jit-benchmark-linux-x86-64" JITBENCH64_2 = 'jit-benchmark-linux-x86-64-2' CPYTHON_64 = "cpython-2-benchmark-x86-64" - +NUMPY_64 = "numpy-compatability-linux-x86-64" extra_opts = {'xerxes': {'keepalive_interval': 15}, 'aurora': {'max_builds': 1}, @@ -393,6 +395,12 @@ 'factory': pypyOwnTestFactoryIndiana, 'category': 'openindiana32', }, + {'name': NUMPY_64, + 'slavenames': ['numpy64'], + 'builddir': NUMPY_64, + 'factory': pypyNumpyCompatability, + 'category': 'numpy', + }, ] + ARM.builders, # http://readthedocs.org/docs/buildbot/en/latest/tour.html#debugging-with-manhole From noreply at buildbot.pypy.org Wed Jul 17 23:04:24 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 17 Jul 2013 23:04:24 +0200 (CEST) Subject: [pypy-commit] pypy default: make small str(small long) much faster by caching the powers of ten needed Message-ID: <20130717210424.3E9E31C021A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65448:7ceb58dbdc24 Date: 2013-07-17 20:24 +0200 http://bitbucket.org/pypy/pypy/changeset/7ceb58dbdc24/ Log: make small str(small long) much faster by caching the powers of ten needed diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2052,6 +2052,23 @@ _FORMAT_MINDIGITS = 5 # 36 ** 5 fits in 32 bits, there may be a better choice for this +class _PartsCache(object): + def __init__(self): + # 36 - 3, because bases 0, 1 make no sense + # and 2 is handled differently + self.parts_cache = [None] * 33 + + def get_cached_parts(self, base): + res = self.parts_cache[base - 3] + if res is None: + rbase = rbigint.fromint(base) + part = rbase.pow(rbigint.fromint(_FORMAT_MINDIGITS)) + res = [part] + self.parts_cache[base - 3] = res + return res + +_parts_cache = _PartsCache() + def _format_int(val, digits): base = len(digits) out = [] @@ -2068,9 +2085,7 @@ if i < 0: # this checks whether any digit has been appended yet if output.getlength() == size_prefix: - if x.sign == 0: - pass - else: + if x.sign != 0: s = _format_int(x.toint(), digits) output.append(s) else: @@ -2095,22 +2110,34 @@ rbase = rbigint.fromint(base) two = rbigint.fromint(2) - pts = [rbase.pow(rbigint.fromint(_FORMAT_MINDIGITS))] + pts = _parts_cache.get_cached_parts(base) stringsize = _FORMAT_MINDIGITS - while pts[-1].lt(x): - pts.append(pts[-1].pow(two)) - stringsize *= 2 - pts.pop() # remove first base**2**i greater than x + startindex = 0 + for startindex, part in enumerate(pts): + if not part.lt(x): + break + stringsize *= 2 # XXX can this overflow on 32 bit? + else: + # not enough parts computed yet + while pts[-1].lt(x): + pts.append(pts[-1].pow(two)) + stringsize *= 2 + + startindex = len(pts) - 1 + + # remove first base**2**i greater than x + startindex -= 1 output = StringBuilder(stringsize) if negative: output.append('-') output.append(prefix) - _format_recursive(x, len(pts)-1, output, pts, digits, output.getlength()) + _format_recursive(x, startindex, output, pts, digits, output.getlength()) output.append(suffix) return output.build() + def _bitwise(a, op, b): # '&', '|', '^' """ Bitwise and/or/xor operations """ diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -515,7 +515,19 @@ assert x.format('.!') == ( '-!....!!..!!..!.!!.!......!...!...!!!........!') assert x.format('abcdefghijkl', '<<', '>>') == '-<>' - + + def test_format_caching(self): + big = rbigint.fromlong(2 ** 1000) + rbigint.pow = None + res1 = big.str() + oldpow = rbigint.__dict__['pow'] + # make sure pow is not used the second time + try: + res2 = big.str() + assert res2 == res1 + finally: + rbigint.pow = oldpow + def test_overzelous_assertion(self): a = rbigint.fromlong(-1<<10000) b = rbigint.fromlong(-1<<3000) From noreply at buildbot.pypy.org Wed Jul 17 23:04:25 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 17 Jul 2013 23:04:25 +0200 (CEST) Subject: [pypy-commit] pypy default: use str if fitting into ints Message-ID: <20130717210425.6BB371C021A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65449:53e2a91724cf Date: 2013-07-17 20:45 +0200 http://bitbucket.org/pypy/pypy/changeset/53e2a91724cf/ Log: use str if fitting into ints diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -454,11 +454,19 @@ @jit.elidable def repr(self): - return self.format(BASE10, suffix="L") + try: + x = self.toint() + except OverflowError: + return self.format(BASE10, suffix="L") + return str(x) + "L" @jit.elidable def str(self): - return self.format(BASE10) + try: + x = self.toint() + except OverflowError: + return self.format(BASE10) + return str(x) @jit.elidable def eq(self, other): From noreply at buildbot.pypy.org Wed Jul 17 23:04:26 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 17 Jul 2013 23:04:26 +0200 (CEST) Subject: [pypy-commit] pypy default: woops, fix test Message-ID: <20130717210426.9811E1C021A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65450:0b3500b8e331 Date: 2013-07-17 21:27 +0200 http://bitbucket.org/pypy/pypy/changeset/0b3500b8e331/ Log: woops, fix test diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -518,9 +518,9 @@ def test_format_caching(self): big = rbigint.fromlong(2 ** 1000) - rbigint.pow = None res1 = big.str() oldpow = rbigint.__dict__['pow'] + rbigint.pow = None # make sure pow is not used the second time try: res2 = big.str() From noreply at buildbot.pypy.org Wed Jul 17 23:04:27 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 17 Jul 2013 23:04:27 +0200 (CEST) Subject: [pypy-commit] pypy default: compute mindigits based on the base, instead of taking a worst-case number Message-ID: <20130717210427.D76841C021A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65451:02e7af864e43 Date: 2013-07-17 21:37 +0200 http://bitbucket.org/pypy/pypy/changeset/02e7af864e43/ Log: compute mindigits based on the base, instead of taking a worst-case number diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2060,21 +2060,35 @@ _FORMAT_MINDIGITS = 5 # 36 ** 5 fits in 32 bits, there may be a better choice for this + class _PartsCache(object): def __init__(self): # 36 - 3, because bases 0, 1 make no sense # and 2 is handled differently - self.parts_cache = [None] * 33 + self.parts_cache = [None] * 34 + self.mindigits = [0] * 34 + + for i in range(34): + base = i + 3 + mindigits = 1 + while base ** mindigits < sys.maxint: + mindigits += 1 + mindigits -= 1 + self.mindigits[i] = mindigits def get_cached_parts(self, base): - res = self.parts_cache[base - 3] + index = base - 3 + res = self.parts_cache[index] if res is None: rbase = rbigint.fromint(base) - part = rbase.pow(rbigint.fromint(_FORMAT_MINDIGITS)) + part = rbase.pow(rbigint.fromint(self.mindigits[index])) res = [part] self.parts_cache[base - 3] = res return res + def get_mindigits(self, base): + return self.mindigits[base - 3] + _parts_cache = _PartsCache() def _format_int(val, digits): @@ -2087,7 +2101,7 @@ return "".join(out) -def _format_recursive(x, i, output, pts, digits, size_prefix): +def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits): # bottomed out with min_digit sized pieces # use str of ints if i < 0: @@ -2098,12 +2112,12 @@ output.append(s) else: s = _format_int(x.toint(), digits) - output.append_multiple_char(digits[0], _FORMAT_MINDIGITS - len(s)) + output.append_multiple_char(digits[0], mindigits - len(s)) output.append(s) else: top, bot = x.divmod(pts[i]) # split the number - _format_recursive(top, i-1, output, pts, digits, size_prefix) - _format_recursive(bot, i-1, output, pts, digits, size_prefix) + _format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits) + _format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits) def _format(x, digits, prefix='', suffix=''): if x.sign == 0: @@ -2119,7 +2133,8 @@ two = rbigint.fromint(2) pts = _parts_cache.get_cached_parts(base) - stringsize = _FORMAT_MINDIGITS + mindigits = _parts_cache.get_mindigits(base) + stringsize = mindigits startindex = 0 for startindex, part in enumerate(pts): if not part.lt(x): @@ -2140,7 +2155,7 @@ if negative: output.append('-') output.append(prefix) - _format_recursive(x, startindex, output, pts, digits, output.getlength()) + _format_recursive(x, startindex, output, pts, digits, output.getlength(), mindigits) output.append(suffix) return output.build() From noreply at buildbot.pypy.org Wed Jul 17 23:04:29 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 17 Jul 2013 23:04:29 +0200 (CEST) Subject: [pypy-commit] pypy default: somewhat annoying hack: if digits are the default base 10 ones, use a Message-ID: <20130717210429.1C5F01C021A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65452:dee9afbcb403 Date: 2013-07-17 21:45 +0200 http://bitbucket.org/pypy/pypy/changeset/dee9afbcb403/ Log: somewhat annoying hack: if digits are the default base 10 ones, use a specialized recursive function that calls the builtin RPython str method. Helps a lot, though. diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2091,7 +2091,7 @@ _parts_cache = _PartsCache() -def _format_int(val, digits): +def _format_int_general(val, digits): base = len(digits) out = [] while val: @@ -2100,8 +2100,11 @@ out.reverse() return "".join(out) +def _format_int10(val, digits): + return str(val) -def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits): + at specialize.arg(7) +def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits, _format_int): # bottomed out with min_digit sized pieces # use str of ints if i < 0: @@ -2116,8 +2119,8 @@ output.append(s) else: top, bot = x.divmod(pts[i]) # split the number - _format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits) - _format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits) + _format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits, _format_int) + _format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits, _format_int) def _format(x, digits, prefix='', suffix=''): if x.sign == 0: @@ -2155,7 +2158,14 @@ if negative: output.append('-') output.append(prefix) - _format_recursive(x, startindex, output, pts, digits, output.getlength(), mindigits) + if digits == BASE10: + _format_recursive( + x, startindex, output, pts, digits, output.getlength(), mindigits, + _format_int10) + else: + _format_recursive( + x, startindex, output, pts, digits, output.getlength(), mindigits, + _format_int_general) output.append(suffix) return output.build() From noreply at buildbot.pypy.org Wed Jul 17 23:44:12 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 17 Jul 2013 23:44:12 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: try to please the annotator Message-ID: <20130717214412.7892B1C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r65453:845e217b0aea Date: 2013-07-17 23:43 +0200 http://bitbucket.org/pypy/pypy/changeset/845e217b0aea/ Log: try to please the annotator diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -4,6 +4,7 @@ """ from rpython.tool.pairtype import extendabletype +from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside from rpython.rlib import jit @@ -169,14 +170,18 @@ the JIT follow the call.''' return space.call_args(w_callable, __args__) - at unwrap_spec(w_code=PyCode, pos=int, value=int) + at unwrap_spec(w_code=PyCode, pos=r_uint, value=int) def set_local_threshold(space, w_code, pos, value): """ set_local_threshold(code, pos, value) For testing. Set the threshold for this code object at position pos at value given. """ - w_code.jit_cells[pos << 1] = r_uint(value) # we ignore the profiling case + from rpython.jit.metainterp.warmstate import JitCell + + ref = w_code.jit_cells[pos << 1] + jitcell = cast_base_ptr_to_instance(JitCell, ref) + jitcell.counter = value @unwrap_spec(w_code=PyCode, value=int) def set_local_bridge_threshold(space, w_code, value): From noreply at buildbot.pypy.org Thu Jul 18 08:31:12 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 18 Jul 2013 08:31:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: prepare for fastpath for ptr_eq and fix slowpath of stm-barriers Message-ID: <20130718063112.A439D1C0651@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65454:ef39cd09001d Date: 2013-07-18 08:30 +0200 http://bitbucket.org/pypy/pypy/changeset/ef39cd09001d/ Log: prepare for fastpath for ptr_eq and fix slowpath of stm-barriers diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -89,6 +89,11 @@ self._build_b_slowpath(d, False) self._build_b_slowpath(d, True) self._build_b_slowpath(d, False, for_frame=True) + # only for stm: + if hasattr(gc_ll_descr, 'stm_ptr_eq_FUNCPTR'): + self._build_ptr_eq_slowpath() + else: + self.ptr_eq_slowpath = None # only one of those self.build_frame_realloc_slowpath() if self.cpu.supports_floats: diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -110,6 +110,8 @@ # the only ops with descrs that get recorded in a trace from rpython.jit.metainterp.history import AbstractDescr descr = op.getdescr() + if not we_are_translated() and descr is None: + return llref = cast_instance_to_gcref(descr) new_llref = rgc._make_sure_does_not_move(llref) new_d = rgc.try_cast_gcref_to_instance(AbstractDescr, new_llref) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -173,12 +173,4 @@ return isinstance(box, ConstPtr) and not box.value def handle_ptr_eq(self, op): - if self._is_null(op.getarg(0)) or self._is_null(op.getarg(1)): - self.newops.append(op) - return - args = op.getarglist() - result = op.result - if op.getopnum() in (rop.PTR_EQ, rop.INSTANCE_PTR_EQ): - self._do_stm_call('stm_ptr_eq', args, result) - else: - self._do_stm_call('stm_ptr_ne', args, result) + self.newops.append(op) diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -517,7 +517,7 @@ jump(i1) """, """ [p1, p2] - i1 = call(ConstClass(stm_ptr_eq), p1, p2, descr=stm_ptr_eq_descr) + i1 = ptr_eq(p1, p2) jump(i1) """) @@ -528,7 +528,7 @@ jump(i1) """, """ [p1, p2] - i1 = call(ConstClass(stm_ptr_eq), p1, p2, descr=stm_ptr_eq_descr) + i1 = instance_ptr_eq(p1, p2) jump(i1) """) @@ -539,7 +539,7 @@ jump(i1) """, """ [p1, p2] - i1 = call(ConstClass(stm_ptr_ne), p1, p2, descr=stm_ptr_ne_descr) + i1 = ptr_ne(p1, p2) jump(i1) """) @@ -550,7 +550,7 @@ jump(i1) """, """ [p1, p2] - i1 = call(ConstClass(stm_ptr_ne), p1, p2, descr=stm_ptr_ne_descr) + i1 = instance_ptr_ne(p1, p2) jump(i1) """) diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -21,6 +21,7 @@ # | scratch | # | space | # +--------------------+ <== aligned to 16 bytes +# STACK TOP # All the rest of the data is in a GC-managed variable-size "frame". # This frame object's address is always stored in the register EBP/RBP. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -309,6 +309,61 @@ rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.stack_check_slowpath = rawstart + + def _build_ptr_eq_slowpath(self): + cpu = self.cpu + is_stm = cpu.gc_ll_descr.stm + assert is_stm + + func = cpu.gc_ll_descr.get_malloc_fn_addr('stm_ptr_eq') + # + # This builds a helper function called from the slow path of + # ptr_eq/ne. It must save all registers, and optionally + # all XMM registers. It takes a single argument just pushed + # on the stack even on X86_64. It must restore stack alignment + # accordingly. + mc = codebuf.MachineCodeBlockWrapper() + # + self._push_all_regs_to_frame(mc, [], withfloats=False, + callee_only=True) + # + if IS_X86_32: + # ||val2|val1|retaddr| growing->, || aligned + mc.SUB_ri(esp.value, 5 * WORD) + # ||val2|val1|retaddr|x||x|x|x|x| + mc.MOV_rs(eax.value, 6 * WORD) + mc.MOV_rs(ecx.value, 7 * WORD) + # eax=val1, ecx=val2 + mc.MOV_sr(0, eax.value) + mc.MOV_sr(WORD, ecx.value) + # ||val2|val1|retaddr|x||x|x|val2|val1| + else: + # ||val2|val1||retaddr| + mc.SUB_ri(esp.value, WORD) + # ||val2|val1||retaddr|x|| + mc.MOV_rs(edi.value, 2 * WORD) + mc.MOV_rs(esi.value, 3 * WORD) + # + mc.CALL(imm(func)) + # eax has result + if IS_X86_32: + mc.ADD_ri(esp.value, 5 * WORD) + else: + mc.ADD_ri(esp.value, WORD) + # + # result in eax, save (not sure if necessary) + mc.PUSH_r(eax.value) + # + self._pop_all_regs_from_frame(mc, [], withfloats=False, + callee_only=True) + # + mc.POP_r(eax.value) + mc.RET16_i(2 * WORD) + + rawstart = mc.materialize(self.cpu.asmmemmgr, []) + self.ptr_eq_slowpath = rawstart + + def _build_b_slowpath(self, descr, withcards, withfloats=False, for_frame=False): is_stm = self.cpu.gc_ll_descr.stm @@ -370,8 +425,13 @@ mc.CALL(imm(func)) if descr.returns_modified_object: - # new addr in eax, save in scratch reg - mc.PUSH_r(eax.value) + # new addr in eax, save to now unused arg + if for_frame: + mc.PUSH_r(eax.value) + elif IS_X86_32: + mc.MOV_sr(3 * WORD, eax.value) + else: + mc.MOV_sr(WORD, eax.value) if withcards: # A final TEST8 before the RET, for the caller. Careful to @@ -392,20 +452,24 @@ self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) if descr.returns_modified_object: - mc.POP_r(eax.value) + if IS_X86_32: + mc.MOV_rs(eax.value, 3 * WORD) + else: + mc.MOV_rs(eax.value, WORD) mc.RET16_i(WORD) else: if IS_X86_32: - mc.MOV_rs(edx.value, 4 * WORD) - mc.MOVSD_xs(xmm0.value, 3 * WORD) - mc.MOV_rs(eax.value, WORD) # restore + mc.MOV_rs(edx.value, 5 * WORD) + mc.MOVSD_xs(xmm0.value, 4 * WORD) + mc.MOV_rs(eax.value, 2 * WORD) # restore self._restore_exception(mc, exc0, exc1) - mc.MOV(exc0, RawEspLoc(WORD * 5, REF)) - mc.MOV(exc1, RawEspLoc(WORD * 6, INT)) + mc.MOV(exc0, RawEspLoc(WORD * 6, REF)) + mc.MOV(exc1, RawEspLoc(WORD * 7, INT)) + + mc.POP_r(eax.value) # return value + mc.LEA_rs(esp.value, 7 * WORD) - if descr.returns_modified_object: - mc.POP_r(eax.value) mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -932,6 +996,43 @@ self.mc.LEA_rm(result_loc.value, (loc.value, delta)) return genop_binary_or_lea + + def genop_ptr_eq(self, op, arglocs, result_loc): + assert self.cpu.gc_ll_descr.stm + rl = result_loc.lowest8bits() + self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) + self.mc.TEST_rr(eax.value, eax.value) + self.mc.SET_ir(rx86.Conditions['NZ'], rl.value) + self.mc.MOVZX8_rr(result_loc.value, rl.value) + + def genop_ptr_ne(self, op, arglocs, result_loc): + assert self.cpu.gc_ll_descr.stm + rl = result_loc.lowest8bits() + self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) + self.mc.TEST_rr(eax.value, eax.value) + self.mc.SET_ir(rx86.Conditions['Z'], rl.value) + self.mc.MOVZX8_rr(result_loc.value, rl.value) + + def genop_guard_ptr_eq(self, op, guard_op, guard_token, + arglocs, result_loc): + guard_opnum = guard_op.getopnum() + self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) + self.mc.TEST_rr(eax.value, eax.value) + if guard_opnum == rop.GUARD_FALSE: + self.implement_guard(guard_token, "Z") + else: + self.implement_guard(guard_token, "NZ") + + def genop_guard_ptr_ne(self, op, guard_op, guard_token, + arglocs, result_loc): + guard_opnum = guard_op.getopnum() + self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) + self.mc.TEST_rr(eax.value, eax.value) + if guard_opnum == rop.GUARD_FALSE: + self.implement_guard(guard_token, "NZ") + else: + self.implement_guard(guard_token, "Z") + def _cmpop(cond, rev_cond): def genop_cmp(self, op, arglocs, result_loc): rl = result_loc.lowest8bits() @@ -1079,8 +1180,8 @@ genop_int_ne = _cmpop("NE", "NE") genop_int_gt = _cmpop("G", "L") genop_int_ge = _cmpop("GE", "LE") - genop_ptr_eq = genop_instance_ptr_eq = genop_int_eq - genop_ptr_ne = genop_instance_ptr_ne = genop_int_ne + genop_instance_ptr_eq = genop_ptr_eq + genop_instance_ptr_ne = genop_ptr_ne genop_float_lt = _cmpop_float('B', 'A') genop_float_le = _cmpop_float('BE', 'AE') @@ -1100,8 +1201,8 @@ genop_guard_int_ne = _cmpop_guard("NE", "NE", "E", "E") genop_guard_int_gt = _cmpop_guard("G", "L", "LE", "GE") genop_guard_int_ge = _cmpop_guard("GE", "LE", "L", "G") - genop_guard_ptr_eq = genop_guard_instance_ptr_eq = genop_guard_int_eq - genop_guard_ptr_ne = genop_guard_instance_ptr_ne = genop_guard_int_ne + genop_guard_instance_ptr_eq = genop_guard_ptr_eq + genop_guard_instance_ptr_ne = genop_guard_ptr_ne genop_guard_uint_gt = _cmpop_guard("A", "B", "BE", "AE") genop_guard_uint_lt = _cmpop_guard("B", "A", "AE", "BE") @@ -1114,6 +1215,9 @@ genop_guard_float_gt = _cmpop_guard_float("A", "B", "BE","AE") genop_guard_float_ge = _cmpop_guard_float("AE","BE", "B", "A") + + + def genop_math_sqrt(self, op, arglocs, resloc): self.mc.SQRTSD(arglocs[0], resloc) @@ -2001,6 +2105,19 @@ self.mc.overwrite(jmp_location - 1, chr(offset)) # ------------------- END CALL ASSEMBLER ----------------------- + def _stm_ptr_eq_fastpath(self, mc, arglocs, result_loc): + assert self.cpu.gc_ll_descr.stm + assert self.ptr_eq_slowpath is not None + a_base = arglocs[0] + b_base = arglocs[1] + # + mc.PUSH(b_base) + mc.PUSH(a_base) + func = self.ptr_eq_slowpath + mc.CALL(imm(func)) + assert isinstance(result_loc, RegLoc) + mc.MOV_rr(result_loc.value, eax.value) + def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, align_stack=False): assert self.cpu.gc_ll_descr.stm diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -7,6 +7,7 @@ ALWAYS_ALLOW_OPERATIONS = set([ 'force_cast', 'keepalive', 'cast_ptr_to_adr', + 'cast_adr_to_int', 'debug_print', 'debug_assert', 'cast_opaque_ptr', 'hint', 'stack_current', 'gc_stack_bottom', 'cast_current_ptr_to_int', # this variant of 'cast_ptr_to_int' is ok @@ -14,7 +15,7 @@ 'jit_force_quasi_immutable', 'jit_marker', 'jit_is_virtual', 'jit_record_known_class', 'gc_identityhash', 'gc_id', 'gc_can_move', 'gc__collect', - 'gc_adr_of_root_stack_top', + 'gc_adr_of_root_stack_top', 'gc_get_original_copy', 'stmgc_get_original_copy', 'weakref_create', 'weakref_deref', 'stm_threadlocalref_get', 'stm_threadlocalref_set', From noreply at buildbot.pypy.org Thu Jul 18 10:19:01 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 18 Jul 2013 10:19:01 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: try to add weakrefs to demo_random.c Message-ID: <20130718081901.B8E7E1C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r408:e203655b8773 Date: 2013-07-18 10:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/e203655b8773/ Log: try to add weakrefs to demo_random.c diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -25,27 +25,46 @@ // SUPPORT #define GCTID_STRUCT_NODE 123 +#define GCTID_WEAKREF 122 + +struct node; +typedef struct node * nodeptr; +struct weak_node { + struct stm_object_s hdr; + nodeptr node; +}; +typedef struct weak_node * weaknodeptr; +#define WEAKNODE_SIZE sizeof(struct weak_node) struct node { struct stm_object_s hdr; long value; revision_t id; revision_t hash; - struct node *next; + nodeptr next; + weaknodeptr weakref; }; -typedef struct node * nodeptr; + + size_t stmcb_size(gcptr ob) { - assert(stm_get_tid(ob) == GCTID_STRUCT_NODE); - return sizeof(struct node); + if (stm_get_tid(ob) == GCTID_STRUCT_NODE) + return sizeof(struct node); + else if (stm_get_tid(ob) == GCTID_WEAKREF) + return WEAKNODE_SIZE; + assert(0); } + void stmcb_trace(gcptr ob, void visit(gcptr *)) { nodeptr n; + if (stm_get_tid(ob) == GCTID_WEAKREF) + return; assert(stm_get_tid(ob) == GCTID_STRUCT_NODE); n = (nodeptr)ob; visit((gcptr *)&n->next); + visit((gcptr *)&n->weakref); } @@ -105,6 +124,21 @@ return (int)(rand_r(&td.thread_seed) % (unsigned int)max); } +gcptr get_random_root() +{ + int num = get_rand(td.num_roots + 1); + if (num == 0) + return stm_thread_local_obj; + else + return td.roots[num - 1]; +} + +gcptr get_random_shared_root() +{ + int num = get_rand(SHARED_ROOTS); + return shared_roots[num]; +} + void copy_roots(gcptr *from, gcptr *to, int num) { int i; @@ -192,6 +226,27 @@ return r; } + +weaknodeptr allocate_weaknodeptr(nodeptr to) +{ + weaknodeptr w; + push_roots(1); + w = (weaknodeptr)stm_weakref_allocate(WEAKNODE_SIZE, GCTID_WEAKREF, + (gcptr)to); + pop_roots(1); + return w; +} + +void set_weakref(nodeptr n, nodeptr to) +{ + stm_push_root((gcptr)n); + weaknodeptr w = allocate_weaknodeptr(to); + n = (nodeptr)stm_pop_root(); + n = (nodeptr)stm_write_barrier((gcptr)n); + n->weakref = w; + dprintf(("set_weakref %p -> %p -> %p\n", n, w, to)); +} + int is_shared_prebuilt(gcptr p) { int i; @@ -448,6 +503,46 @@ return p; } +gcptr weakref_events(gcptr p, gcptr _r, gcptr _sr) +{ + nodeptr t; + weaknodeptr w; + gcptr ptrs[] = {_r, _sr}; + + int i = get_rand(2); + int k = get_rand(3); + switch (k) { + case 0: // check weakref + t = (nodeptr)read_barrier(ptrs[i]); + w = t->weakref; + if(w) { + if (w->node) { + assert(stm_get_tid((gcptr)w) == GCTID_WEAKREF); + check((gcptr)w->node); + return (gcptr)w->node; + } + else { + t->weakref = NULL; + } + } + p = NULL; + break; + case 1: // set weakref to something + if (p) + set_weakref((nodeptr)_r, (nodeptr)p); + else + set_weakref((nodeptr)_r, (nodeptr)get_random_root()); + p = NULL; + break; + case 2: // set weakref on shared roots + set_weakref((nodeptr)_sr, (nodeptr)get_random_shared_root()); + p = NULL; + break; + } + return p; +} + + gcptr shared_roots_events(gcptr p, gcptr _r, gcptr _sr) { nodeptr w_sr; @@ -462,7 +557,7 @@ break; case 2: w_sr = (nodeptr)write_barrier(_sr); - w_sr->next = (nodeptr)shared_roots[get_rand(SHARED_ROOTS)]; + w_sr->next = (nodeptr)get_random_shared_root(); break; } return p; @@ -527,18 +622,12 @@ gcptr do_step(gcptr p) { gcptr _r, _sr; - int num, k; + int k; - num = get_rand(td.num_roots+1); - if (num == 0) - _r = stm_thread_local_obj; - else - _r = td.roots[num - 1]; - - num = get_rand(SHARED_ROOTS); - _sr = shared_roots[num]; + _r = get_random_root(); + _sr = get_random_shared_root(); - k = get_rand(9); + k = get_rand(11); check(p); assert(thread_descriptor->active); @@ -550,6 +639,8 @@ p = id_hash_events(p, _r, _sr); else if (k < 8) p = rare_events(p, _r, _sr); + else if (k < 10) + p = weakref_events(p, _r, _sr); else if (get_rand(20) == 1) { // transaction break fprintf(stdout, "|"); diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -5,11 +5,14 @@ gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj) { + stm_push_root(obj); gcptr weakref = stm_allocate(size, tid); + obj = stm_pop_root(); assert(!(weakref->h_tid & GCFLAG_OLD)); /* 'size' too big? */ assert(stmgc_size(weakref) == size); WEAKREF_PTR(weakref, size) = obj; gcptrlist_insert(&thread_descriptor->young_weakrefs, weakref); + dprintf(("alloc weakref %p -> %p\n", weakref, obj)); return weakref; } From noreply at buildbot.pypy.org Thu Jul 18 11:05:06 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 18 Jul 2013 11:05:06 +0200 (CEST) Subject: [pypy-commit] pypy default: kill no longer needed constant Message-ID: <20130718090506.2B9051C14B6@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65455:99c8ce4a8899 Date: 2013-07-18 11:04 +0200 http://bitbucket.org/pypy/pypy/changeset/99c8ce4a8899/ Log: kill no longer needed constant diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -2058,8 +2058,6 @@ # hint for the annotator for the slice below) return ''.join(result[next_char_index:]) -_FORMAT_MINDIGITS = 5 # 36 ** 5 fits in 32 bits, there may be a better choice for this - class _PartsCache(object): def __init__(self): From noreply at buildbot.pypy.org Thu Jul 18 11:18:27 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 11:18:27 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: merge default Message-ID: <20130718091827.B01691C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r65456:fc27f85eb8d9 Date: 2013-07-18 11:17 +0200 http://bitbucket.org/pypy/pypy/changeset/fc27f85eb8d9/ Log: merge default diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -16,3 +16,18 @@ .. branch: flowoperators Simplify rpython/flowspace/ code by using more metaprogramming. Create SpaceOperator class to gather static information about flow graph operations. + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. + +.. branch: ssl_moving_write_buffer diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -196,6 +196,11 @@ print >> sys.stderr, "Python", sys.version raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -381,6 +386,7 @@ 'Q': (div_option, Ellipsis), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -36,6 +36,19 @@ } +class IntOpModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'int_add': 'interp_intop.int_add', + 'int_sub': 'interp_intop.int_sub', + 'int_mul': 'interp_intop.int_mul', + 'int_floordiv': 'interp_intop.int_floordiv', + 'int_mod': 'interp_intop.int_mod', + 'int_lshift': 'interp_intop.int_lshift', + 'uint_rshift': 'interp_intop.uint_rshift', + } + + class Module(MixedModule): appleveldefs = { } @@ -67,6 +80,7 @@ "builders": BuildersModule, "time": TimeModule, "thread": ThreadModule, + "intop": IntOpModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_intop.py @@ -0,0 +1,35 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rarithmetic import r_uint, intmask + + + at unwrap_spec(n=int, m=int) +def int_add(space, n, m): + return space.wrap(llop.int_add(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_sub(space, n, m): + return space.wrap(llop.int_sub(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mul(space, n, m): + return space.wrap(llop.int_mul(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_floordiv(space, n, m): + return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mod(space, n, m): + return space.wrap(llop.int_mod(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_lshift(space, n, m): + return space.wrap(llop.int_lshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def uint_rshift(space, n, m): + n = r_uint(n) + x = llop.uint_rshift(lltype.Unsigned, n, m) + return space.wrap(intmask(x)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_intop.py @@ -0,0 +1,96 @@ + + +class AppTestIntOp: + spaceconfig = dict(usemodules=['__pypy__']) + + def w_intmask(self, n): + import sys + n &= (sys.maxint*2+1) + if n > sys.maxint: + n -= 2*(sys.maxint+1) + return int(n) + + def test_intmask(self): + import sys + assert self.intmask(sys.maxint) == sys.maxint + assert self.intmask(sys.maxint+1) == -sys.maxint-1 + assert self.intmask(-sys.maxint-2) == sys.maxint + N = 2 ** 128 + assert self.intmask(N+sys.maxint) == sys.maxint + assert self.intmask(N+sys.maxint+1) == -sys.maxint-1 + assert self.intmask(N-sys.maxint-2) == sys.maxint + + def test_int_add(self): + import sys + from __pypy__ import intop + assert intop.int_add(40, 2) == 42 + assert intop.int_add(sys.maxint, 1) == -sys.maxint-1 + assert intop.int_add(-2, -sys.maxint) == sys.maxint + + def test_int_sub(self): + import sys + from __pypy__ import intop + assert intop.int_sub(40, -2) == 42 + assert intop.int_sub(sys.maxint, -1) == -sys.maxint-1 + assert intop.int_sub(-2, sys.maxint) == sys.maxint + + def test_int_mul(self): + import sys + from __pypy__ import intop + assert intop.int_mul(40, -2) == -80 + assert intop.int_mul(-sys.maxint, -sys.maxint) == ( + self.intmask(sys.maxint ** 2)) + + def test_int_floordiv(self): + import sys + from __pypy__ import intop + assert intop.int_floordiv(41, 3) == 13 + assert intop.int_floordiv(41, -3) == -13 + assert intop.int_floordiv(-41, 3) == -13 + assert intop.int_floordiv(-41, -3) == 13 + assert intop.int_floordiv(-sys.maxint, -1) == sys.maxint + assert intop.int_floordiv(sys.maxint, -1) == -sys.maxint + + def test_int_mod(self): + import sys + from __pypy__ import intop + assert intop.int_mod(41, 3) == 2 + assert intop.int_mod(41, -3) == 2 + assert intop.int_mod(-41, 3) == -2 + assert intop.int_mod(-41, -3) == -2 + assert intop.int_mod(-sys.maxint, -1) == 0 + assert intop.int_mod(sys.maxint, -1) == 0 + + def test_int_lshift(self): + import sys + from __pypy__ import intop + if sys.maxint == 2**31-1: + bits = 32 + else: + bits = 64 + assert intop.int_lshift(42, 3) == 42 << 3 + assert intop.int_lshift(0, 3333) == 0 + assert intop.int_lshift(1, bits-2) == 1 << (bits-2) + assert intop.int_lshift(1, bits-1) == -sys.maxint-1 == (-1) << (bits-1) + assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) + assert intop.int_lshift(-1, bits-1) == -sys.maxint-1 + assert intop.int_lshift(sys.maxint // 3, 2) == ( + self.intmask((sys.maxint // 3) << 2)) + assert intop.int_lshift(-sys.maxint // 3, 2) == ( + self.intmask((-sys.maxint // 3) << 2)) + + def test_uint_rshift(self): + import sys + from __pypy__ import intop + if sys.maxint == 2**31-1: + bits = 32 + else: + bits = 64 + N = 1 << bits + assert intop.uint_rshift(42, 3) == 42 >> 3 + assert intop.uint_rshift(-42, 3) == (N-42) >> 3 + assert intop.uint_rshift(0, 3333) == 0 + assert intop.uint_rshift(-1, 0) == -1 + assert intop.uint_rshift(-1, 1) == sys.maxint + assert intop.uint_rshift(-1, bits-2) == 3 + assert intop.uint_rshift(-1, bits-1) == 1 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2760,6 +2760,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -722,7 +722,10 @@ libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -280,7 +280,7 @@ backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): - return rffi.cast(lltype.Signed, self.storage) + return rffi.cast(lltype.Signed, self.storage) + self.start def get_storage(self): return self.storage diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2212,6 +2212,11 @@ a = a[::2] i = a.__array_interface__ assert isinstance(i['data'][0], int) + b = array(range(9), dtype=int) + c = b[3:5] + b_data = b.__array_interface__['data'][0] + c_data = c.__array_interface__['data'][0] + assert b_data + 3 * b.dtype.itemsize == c_data def test_array_indexing_one_elem(self): from numpypy import array, arange diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1050,7 +1050,7 @@ else: # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" data_w = space.listview(space.call_function(w_method)) - update1_keys(space, w_dict, data_w) + update1_keys(space, w_dict, w_data, data_w) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -1074,7 +1074,7 @@ w_dict.setitem(w_key, w_value) -def update1_keys(space, w_dict, data_w): +def update1_keys(space, w_dict, w_data, data_w): for w_key in data_w: w_value = space.getitem(w_data, w_key) w_dict.setitem(w_key, w_value) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -367,6 +367,16 @@ d.update({'foo': 'bar'}, baz=1) assert d == {'foo': 'bar', 'baz': 1} + def test_update_keys_method(self): + class Foo(object): + def keys(self): + return [4, 1] + def __getitem__(self, key): + return key * 10 + d = {} + d.update(Foo()) + assert d == {1: 10, 4: 40} + def test_values(self): d = {1: 2, 3: 4} vals = d.values() diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -740,7 +740,8 @@ s = a.build_types(f, [B]) assert s.classdef is a.bookkeeper.getuniqueclassdef(C) - def test_union_type_some_opbc(self): + def test_union_type_some_pbc(self): + py.test.skip("is there a point? f() can return self.__class__ instead") class A(object): name = "A" diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -23,7 +23,6 @@ supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode supports_singlefloats = not detect_hardfloat() - can_inline_varsize_malloc = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -21,8 +21,6 @@ class AbstractLLCPU(AbstractCPU): from rpython.jit.metainterp.typesystem import llhelper as ts - can_inline_varsize_malloc = False - def __init__(self, rtyper, stats, opts, translate_support_code=False, gcdescr=None): assert type(opts) is not bool diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -305,8 +305,6 @@ arraydescr, kind=FLAG_ARRAY): """ itemsize is an int, v_length and v_result are boxes """ - if not self.cpu.can_inline_varsize_malloc: - return False # temporary, kill when ARM supports it gc_descr = self.gc_ll_descr if (kind == FLAG_ARRAY and (arraydescr.basesize != gc_descr.standard_array_basesize or diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -109,8 +109,6 @@ class BaseFakeCPU(object): JITFRAME_FIXED_SIZE = 0 - can_inline_varsize_malloc = True - def __init__(self): self.tracker = FakeTracker() self._cache = {} diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -23,8 +23,6 @@ with_threads = False frame_reg = regloc.ebp - can_inline_varsize_malloc = True - from rpython.jit.backend.x86.arch import JITFRAME_FIXED_SIZE all_reg_indexes = gpr_reg_mgr_cls.all_reg_indexes gen_regs = gpr_reg_mgr_cls.all_regs diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -7,6 +7,7 @@ from rpython.rlib import rgc from rpython.rlib.jit import elidable, oopspec from rpython.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask +from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper import rlist from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.extregistry import ExtRegistryEntry @@ -272,10 +273,9 @@ return result def _ll_1_int_abs(x): - if x < 0: - return -x - else: - return x + # this version doesn't branch + mask = x >> (LONG_BIT - 1) + return (x ^ mask) - mask def _ll_1_cast_uint_to_float(x): # XXX on 32-bit platforms, this should be done using cast_longlong_to_float diff --git a/rpython/jit/codewriter/test/test_codewriter.py b/rpython/jit/codewriter/test/test_codewriter.py --- a/rpython/jit/codewriter/test/test_codewriter.py +++ b/rpython/jit/codewriter/test/test_codewriter.py @@ -13,6 +13,7 @@ self.ARGS = ARGS self.RESULT = RESULT self.effectinfo = effectinfo + def get_extra_info(self): return self.effectinfo @@ -37,7 +38,7 @@ class tracker: pass - + calldescrof = FakeCallDescr fielddescrof = FakeFieldDescr sizeof = FakeSizeDescr @@ -121,20 +122,32 @@ blackholeinterp.run() assert blackholeinterp.get_tmpreg_i() == 100+6+5+4+3 + def test_instantiate(): - class A1: id = 651 - class A2(A1): id = 652 - class B1: id = 661 - class B2(B1): id = 662 + class A1: + id = 651 + + class A2(A1): + id = 652 + + class B1: + id = 661 + + class B2(B1): + id = 662 + def dont_look(n): return n + 1 + + classes = [ + (A1, B1), + (A2, B2) + ] + def f(n): - if n > 5: - x, y = A1, B1 - else: - x, y = A2, B2 + x, y = classes[n] return x().id + y().id + dont_look(n) - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [0]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(FakePolicy()) @@ -149,16 +162,10 @@ else: assert 0, "missing instantiate_*_%s in:\n%r" % (expected, names) - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'B1' in names - assert 'A2' in names - assert 'B2' in names + names = set([value for key, value in cw.assembler.list_of_addr2name]) assert 'dont_look' in names + def test_instantiate_with_unreasonable_attr(): # It is possible to have in real code the instantiate() function for # a class be dont-look-inside. This is caused by the code that @@ -169,17 +176,19 @@ name = graph.name return not (name.startswith('instantiate_') and name.endswith('A2')) + class A1: pass + class A2(A1): pass + + classes = [A1, A2] + def f(n): - if n > 5: - x = A1 - else: - x = A2 + x = classes[n] x() - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [1]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(MyFakePolicy()) @@ -188,12 +197,7 @@ names = [jitcode.name for jitcode in cw.assembler.indirectcalltargets] assert len(names) == 1 assert names[0].startswith('instantiate_') and names[0].endswith('A1') - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'A2' in names + def test_int_abs(): def f(n): @@ -209,7 +213,7 @@ def test_raw_malloc_and_access(): TP = rffi.CArray(lltype.Signed) - + def f(n): a = lltype.malloc(TP, n, flavor='raw') a[0] = n diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,15 +1,22 @@ +import pytest + +from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, + EffectInfo, VirtualizableAnalyzer) +from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.rclass import OBJECT -from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.ootypesystem import ootype -from rpython.jit.codewriter.effectinfo import effectinfo_from_writeanalyze,\ - EffectInfo +from rpython.translator.translator import TranslationContext, graphof -class FakeCPU: + +class FakeCPU(object): def fielddescrof(self, T, fieldname): return ('fielddescr', T, fieldname) + def arraydescrof(self, A): return ('arraydescr', A) + def test_no_oopspec_duplicate(): # check that all the various EffectInfo.OS_* have unique values oopspecs = set() @@ -18,6 +25,7 @@ assert value not in oopspecs oopspecs.add(value) + def test_include_read_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) @@ -26,6 +34,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) @@ -34,6 +43,7 @@ assert not effectinfo.readonly_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) @@ -43,6 +53,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) @@ -51,6 +62,7 @@ assert not effectinfo.write_descrs_fields assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + def test_dont_include_read_and_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a"), @@ -60,6 +72,7 @@ assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] assert not effectinfo.write_descrs_arrays + def test_dont_include_read_and_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A)), @@ -78,6 +91,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -85,6 +99,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -92,6 +107,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_ooarray_of_void(): effects = frozenset([("array", ootype.Array(ootype.Void))]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -99,9 +115,43 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_instance_with_void(): effects = frozenset([("struct", ootype.Instance("x", ootype.ROOT, {"a": ootype.Void}), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) assert not effectinfo.readonly_descrs_fields assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + + +class TestVirtualizableAnalyzer(object): + def analyze(self, func, sig): + t = TranslationContext() + t.buildannotator().build_types(func, sig) + t.buildrtyper().specialize() + fgraph = graphof(t, func) + return VirtualizableAnalyzer(t).analyze(fgraph.startblock.operations[0]) + + def test_constructor(self): + class A(object): + x = 1 + + class B(A): + x = 2 + + @jit.elidable + def g(cls): + return cls() + + def f(x): + if x: + cls = A + else: + cls = B + return g(cls).x + + def entry(x): + return f(x) + + res = self.analyze(entry, [int]) + assert not res diff --git a/rpython/jit/codewriter/test/test_support.py b/rpython/jit/codewriter/test/test_support.py --- a/rpython/jit/codewriter/test/test_support.py +++ b/rpython/jit/codewriter/test/test_support.py @@ -1,8 +1,9 @@ -import py +import py, sys from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.annlowlevel import llstr from rpython.flowspace.model import Variable, Constant, SpaceOperation from rpython.jit.codewriter.support import decode_builtin_call, LLtypeHelpers +from rpython.jit.codewriter.support import _ll_1_int_abs def newconst(x): return Constant(x, lltype.typeOf(x)) @@ -133,3 +134,12 @@ py.test.raises(IndexError, func, p1, llstr("w")) py.test.raises(AttributeError, func, p1, llstr(None)) py.test.raises(AttributeError, func, llstr(None), p2) + +def test_int_abs(): + assert _ll_1_int_abs(0) == 0 + assert _ll_1_int_abs(1) == 1 + assert _ll_1_int_abs(10) == 10 + assert _ll_1_int_abs(sys.maxint) == sys.maxint + assert _ll_1_int_abs(-1) == 1 + assert _ll_1_int_abs(-10) == 10 + assert _ll_1_int_abs(-sys.maxint) == sys.maxint diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -454,11 +454,19 @@ @jit.elidable def repr(self): - return self.format(BASE10, suffix="L") + try: + x = self.toint() + except OverflowError: + return self.format(BASE10, suffix="L") + return str(x) + "L" @jit.elidable def str(self): - return self.format(BASE10) + try: + x = self.toint() + except OverflowError: + return self.format(BASE10) + return str(x) @jit.elidable def eq(self, other): @@ -2050,9 +2058,38 @@ # hint for the annotator for the slice below) return ''.join(result[next_char_index:]) -_FORMAT_MINDIGITS = 5 # 36 ** 5 fits in 32 bits, there may be a better choice for this -def _format_int(val, digits): +class _PartsCache(object): + def __init__(self): + # 36 - 3, because bases 0, 1 make no sense + # and 2 is handled differently + self.parts_cache = [None] * 34 + self.mindigits = [0] * 34 + + for i in range(34): + base = i + 3 + mindigits = 1 + while base ** mindigits < sys.maxint: + mindigits += 1 + mindigits -= 1 + self.mindigits[i] = mindigits + + def get_cached_parts(self, base): + index = base - 3 + res = self.parts_cache[index] + if res is None: + rbase = rbigint.fromint(base) + part = rbase.pow(rbigint.fromint(self.mindigits[index])) + res = [part] + self.parts_cache[base - 3] = res + return res + + def get_mindigits(self, base): + return self.mindigits[base - 3] + +_parts_cache = _PartsCache() + +def _format_int_general(val, digits): base = len(digits) out = [] while val: @@ -2061,26 +2098,27 @@ out.reverse() return "".join(out) +def _format_int10(val, digits): + return str(val) -def _format_recursive(x, i, output, pts, digits, size_prefix): + at specialize.arg(7) +def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits, _format_int): # bottomed out with min_digit sized pieces # use str of ints if i < 0: # this checks whether any digit has been appended yet if output.getlength() == size_prefix: - if x.sign == 0: - pass - else: + if x.sign != 0: s = _format_int(x.toint(), digits) output.append(s) else: s = _format_int(x.toint(), digits) - output.append_multiple_char(digits[0], _FORMAT_MINDIGITS - len(s)) + output.append_multiple_char(digits[0], mindigits - len(s)) output.append(s) else: top, bot = x.divmod(pts[i]) # split the number - _format_recursive(top, i-1, output, pts, digits, size_prefix) - _format_recursive(bot, i-1, output, pts, digits, size_prefix) + _format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits, _format_int) + _format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits, _format_int) def _format(x, digits, prefix='', suffix=''): if x.sign == 0: @@ -2095,22 +2133,42 @@ rbase = rbigint.fromint(base) two = rbigint.fromint(2) - pts = [rbase.pow(rbigint.fromint(_FORMAT_MINDIGITS))] - stringsize = _FORMAT_MINDIGITS - while pts[-1].lt(x): - pts.append(pts[-1].pow(two)) - stringsize *= 2 - pts.pop() # remove first base**2**i greater than x + pts = _parts_cache.get_cached_parts(base) + mindigits = _parts_cache.get_mindigits(base) + stringsize = mindigits + startindex = 0 + for startindex, part in enumerate(pts): + if not part.lt(x): + break + stringsize *= 2 # XXX can this overflow on 32 bit? + else: + # not enough parts computed yet + while pts[-1].lt(x): + pts.append(pts[-1].pow(two)) + stringsize *= 2 + + startindex = len(pts) - 1 + + # remove first base**2**i greater than x + startindex -= 1 output = StringBuilder(stringsize) if negative: output.append('-') output.append(prefix) - _format_recursive(x, len(pts)-1, output, pts, digits, output.getlength()) + if digits == BASE10: + _format_recursive( + x, startindex, output, pts, digits, output.getlength(), mindigits, + _format_int10) + else: + _format_recursive( + x, startindex, output, pts, digits, output.getlength(), mindigits, + _format_int_general) output.append(suffix) return output.build() + def _bitwise(a, op, b): # '&', '|', '^' """ Bitwise and/or/xor operations """ diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -93,6 +93,7 @@ SSL_RECEIVED_SHUTDOWN = rffi_platform.ConstantInteger( "SSL_RECEIVED_SHUTDOWN") SSL_MODE_AUTO_RETRY = rffi_platform.ConstantInteger("SSL_MODE_AUTO_RETRY") + SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER = rffi_platform.ConstantInteger("SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER") NID_subject_alt_name = rffi_platform.ConstantInteger("NID_subject_alt_name") GEN_DIRNAME = rffi_platform.ConstantInteger("GEN_DIRNAME") diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -515,7 +515,19 @@ assert x.format('.!') == ( '-!....!!..!!..!.!!.!......!...!...!!!........!') assert x.format('abcdefghijkl', '<<', '>>') == '-<>' - + + def test_format_caching(self): + big = rbigint.fromlong(2 ** 1000) + res1 = big.str() + oldpow = rbigint.__dict__['pow'] + rbigint.pow = None + # make sure pow is not used the second time + try: + res2 = big.str() + assert res2 == res1 + finally: + rbigint.pow = oldpow + def test_overzelous_assertion(self): a = rbigint.fromlong(-1<<10000) b = rbigint.fromlong(-1<<3000) diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -1,25 +1,20 @@ import types -import sys -from rpython.tool.pairtype import pairtype, pair -from rpython.annotator import model as annmodel -from rpython.annotator import description -from rpython.flowspace.model import Constant, Variable -from rpython.rtyper.lltypesystem.lltype import \ - typeOf, Void, ForwardReference, Struct, Bool, Char, \ - Ptr, malloc, nullptr, Array, Signed, FuncType -from rpython.rtyper.rmodel import Repr, TyperError, inputconst, inputdesc -from rpython.rtyper.rpbc import samesig,\ - commonbase, allattributenames, adjust_shape, \ - AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, \ - AbstractMultipleFrozenPBCRepr, MethodOfFrozenPBCRepr, \ - AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, \ - SingleFrozenPBCRepr, none_frozen_pbc_repr, get_concrete_calltable + +from rpython.annotator import description, model as annmodel +from rpython.rlib.debug import ll_assert +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper import callparse from rpython.rtyper.lltypesystem import rclass, llmemory -from rpython.tool.sourcetools import has_varargs -from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.debug import ll_assert +from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, + Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) +from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.rpbc import (AbstractClassesPBCRepr, AbstractMethodsPBCRepr, + OverriddenFunctionPBCRepr, AbstractMultipleFrozenPBCRepr, + AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, + SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, + get_concrete_calltable) +from rpython.tool.pairtype import pairtype -from rpython.rtyper import callparse def rtype_is_None(robj1, rnone2, hop, pos=0): if isinstance(robj1.lowleveltype, Ptr): @@ -41,6 +36,7 @@ else: raise TyperError('rtype_is_None of %r' % (robj1)) + # ____________________________________________________________ class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): @@ -67,7 +63,7 @@ mangled_name, r_value = self.fieldmap[attr] cmangledname = inputconst(Void, mangled_name) return llops.genop('getfield', [vpbc, cmangledname], - resulttype = r_value) + resulttype=r_value) class MultipleUnrelatedFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr): @@ -86,6 +82,7 @@ def null_instance(self): return llmemory.Address._defl() + class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr), pairtype(MultipleUnrelatedFrozenPBCRepr, @@ -100,11 +97,13 @@ vlist = hop.inputargs(r, r) return hop.genop('adr_eq', vlist, resulttype=Bool) + class __extend__(pairtype(MultipleFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr)): def convert_from_to((robj1, robj2), v, llops): return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address) + # ____________________________________________________________ class FunctionsPBCRepr(AbstractFunctionsPBCRepr): @@ -123,6 +122,7 @@ def get_specfunc_row(self, llop, v, c_rowname, resulttype): return llop.genop('getfield', [v, c_rowname], resulttype=resulttype) + class SmallFunctionSetPBCRepr(Repr): def __init__(self, rtyper, s_pbc): self.rtyper = rtyper @@ -252,15 +252,6 @@ return hop.genop('char_ne', [v1, inputconst(Char, '\000')], resulttype=Bool) -## def rtype_simple_call(self, hop): -## v_index = hop.inputarg(self, arg=0) -## v_ptr = hop.llops.convertvar(v_index, self, self.pointer_repr) -## hop2 = hop.copy() -## hop2.args_r[0] = self.pointer_repr -## hop2.args_v[0] = v_ptr -## return hop2.dispatch() - -## rtype_call_args = rtype_simple_call class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_set, r_ptr), v, llops): @@ -273,6 +264,7 @@ return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], resulttype=r_ptr.lowleveltype) + def compression_function(r_set): if r_set._compression_function is None: table = [] @@ -280,6 +272,7 @@ table.append((chr(i), p)) last_c, last_p = table[-1] unroll_table = unrolling_iterable(table[:-1]) + def ll_compress(fnptr): for c, p in unroll_table: if fnptr == p: @@ -290,6 +283,7 @@ r_set._compression_function = ll_compress return r_set._compression_function + class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_ptr, r_set), v, llops): if r_ptr.lowleveltype is Void: @@ -299,6 +293,7 @@ ll_compress = compression_function(r_set) return llops.gendirectcall(ll_compress, v) + def conversion_table(r_from, r_to): if r_to in r_from._conversion_tables: return r_from._conversion_tables[r_to] @@ -320,7 +315,6 @@ r_from._conversion_tables[r_to] = r return r -## myf = open('convlog.txt', 'w') class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_from, r_to), v, llops): @@ -343,6 +337,7 @@ else: return v + class MethodsPBCRepr(AbstractMethodsPBCRepr): """Representation selected for a PBC of the form {func: classdef...}. It assumes that all the methods come from the same name in a base @@ -394,9 +389,12 @@ # no __init__ here, AbstractClassesPBCRepr.__init__ is good enough def _instantiate_runtime_class(self, hop, vtypeptr, r_instance): - from rpython.rtyper.lltypesystem.rbuiltin import ll_instantiate - v_inst1 = hop.gendirectcall(ll_instantiate, vtypeptr) - return hop.genop('cast_pointer', [v_inst1], resulttype = r_instance) + v_instantiate = hop.genop('getfield', [vtypeptr, hop.inputconst(Void, "instantiate")], resulttype=vtypeptr.concretetype.TO.instantiate) + possible_graphs = hop.inputconst(Void, + [desc.getclassdef(None).my_instantiate_graph for desc in self.s_pbc.descriptions] + ) + v_inst = hop.genop('indirect_call', [v_instantiate, possible_graphs], resulttype=vtypeptr.concretetype.TO.instantiate.TO.RESULT) + return hop.genop('cast_pointer', [v_inst], resulttype=r_instance) def getlowleveltype(self): return rclass.CLASSTYPE @@ -415,17 +413,3 @@ return 0 else: return cls.hash - -# ____________________________________________________________ - -##def rtype_call_memo(hop): -## memo_table = hop.args_v[0].value -## if memo_table.s_result.is_constant(): -## return hop.inputconst(hop.r_result, memo_table.s_result.const) -## fieldname = memo_table.fieldname -## assert hop.nb_args == 2, "XXX" - -## r_pbc = hop.args_r[1] -## assert isinstance(r_pbc, (MultipleFrozenPBCRepr, ClassesPBCRepr)) -## v_table, v_pbc = hop.inputargs(Void, r_pbc) -## return r_pbc.getfield(v_pbc, fieldname, hop.llops) diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -32,22 +32,22 @@ def collect_called_graphs(graph, translator, include_oosend=True): - graphs_or_something = {} + graphs_or_something = set() for block in graph.iterblocks(): for op in block.operations: if op.opname == "direct_call": graph = get_graph(op.args[0], translator) if graph is not None: - graphs_or_something[graph] = True + graphs_or_something.add(graph) else: - graphs_or_something[op.args[0]] = True + graphs_or_something.add(op.args[0]) if op.opname == "indirect_call": graphs = op.args[-1].value if graphs is None: - graphs_or_something[op.args[0]] = True + graphs_or_something.add(op.args[0]) else: for graph in graphs: - graphs_or_something[graph] = True + graphs_or_something.add(graph) if op.opname == 'oosend' and include_oosend: meth = get_meth_from_oosend(op) if hasattr(meth, 'graph'): @@ -56,7 +56,7 @@ key = CanRaise(meth._can_raise) else: key = op.args[0] - graphs_or_something[key] = True + graphs_or_something.add(key) return graphs_or_something def iter_callsites(graph, calling_what): From noreply at buildbot.pypy.org Thu Jul 18 12:21:20 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 12:21:20 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: hack differently Message-ID: <20130718102120.2D2C91C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r65457:9970eb753fab Date: 2013-07-18 12:20 +0200 http://bitbucket.org/pypy/pypy/changeset/9970eb753fab/ Log: hack differently diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -6,7 +6,8 @@ from rpython.tool.pairtype import extendabletype from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance from rpython.rlib.rarithmetic import r_uint, intmask -from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside +from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside,\ + BaseJitCell from rpython.rlib import jit from rpython.rlib.jit import current_trace_length, unroll_parameters import pypy.interpreter.pyopcode # for side-effects @@ -177,10 +178,8 @@ For testing. Set the threshold for this code object at position pos at value given. """ - from rpython.jit.metainterp.warmstate import JitCell - ref = w_code.jit_cells[pos << 1] - jitcell = cast_base_ptr_to_instance(JitCell, ref) + jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) jitcell.counter = value @unwrap_spec(w_code=PyCode, value=int) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -684,7 +684,7 @@ # Annotation and rtyping of some of the JitDriver methods class BaseJitCell(object): - __slots__ = () + __slots__ = ('counter') class ExtEnterLeaveMarker(ExtRegistryEntry): From noreply at buildbot.pypy.org Thu Jul 18 12:40:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 12:40:16 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: start hacking on list_resize_ge Message-ID: <20130718104016.3D1891C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65458:f8d06bf178e9 Date: 2013-07-18 12:39 +0200 http://bitbucket.org/pypy/pypy/changeset/f8d06bf178e9/ Log: start hacking on list_resize_ge diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,9 +1,10 @@ from rpython.rlib.rarithmetic import ovfcheck -from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem import llmemory from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.codewriter import heaptracker +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from rpython.jit.metainterp.history import JitCellToken @@ -78,6 +79,11 @@ if op.getopnum() == rop.CALL_ASSEMBLER: self.handle_call_assembler(op) continue + if op.getopnum() == rop.CALL: + idx = op.getdescr().get_extra_info().oopspecindex + if idx == EffectInfo.OS_LIST_RESIZE_GE: + self.handle_list_resize_ge(op) + continue # self.newops.append(op) return self.newops @@ -118,6 +124,12 @@ else: self.gen_malloc_fixedsize(size, descr.tid, op.result) + def handle_list_resize_ge(self, op): + """ what we want to do is to check the length and than add a conditional + call to really resize + """ + xxx + def handle_new_array(self, arraydescr, op, kind=FLAG_ARRAY): v_length = op.getarg(0) total_size = -1 diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -773,3 +773,6 @@ setarrayitem_gc(p1, 1, f0, descr=floatframedescr) i3 = call_assembler(p1, descr=casmdescr) """) + + def test_rewrite_list_resize_ge(self): + pass diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -85,10 +85,14 @@ OS_JIT_FORCE_VIRTUAL = 120 + OS_LIST_RESIZE_GE = 130 + OS_LIST_RESIZE_LE = 130 + # for debugging: _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, - OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, + OS_JIT_FORCE_VIRTUAL, OS_SHRINK_ARRAY, OS_LIST_RESIZE_GE, + OS_LIST_RESIZE_LE, ]) def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -359,11 +359,12 @@ else: raise AssertionError(kind) lst.append(v) - def handle_residual_call(self, op, extraargs=[], may_call_jitcodes=False): + def handle_residual_call(self, op, extraargs=[], may_call_jitcodes=False, + oopspecindex=EffectInfo.OS_NONE): """A direct_call turns into the operation 'residual_call_xxx' if it is calling a function that we don't want to JIT. The initial args of 'residual_call_xxx' are the function to call, and its calldescr.""" - calldescr = self.callcontrol.getcalldescr(op) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex) op1 = self.rewrite_call(op, 'residual_call', [op.args[0]] + extraargs, calldescr=calldescr) if may_call_jitcodes or self.callcontrol.calldescr_canraise(calldescr): @@ -1616,6 +1617,15 @@ do_resizable_void_list_getitem_foldable = do_resizable_void_list_getitem do_resizable_void_list_setitem = do_resizable_void_list_getitem + def do_resizable_list__resize_ge(self, op, args, *descrs): + index = EffectInfo.OS_LIST_RESIZE_GE + op1 = self.handle_residual_call(op, oopspecindex=index)[0] + LIST = args[0].concretetype.TO + lengthdescr = self.cpu.fielddescrof(LIST, 'length') + arraydescr = self.cpu.arraydescrof(LIST.items.TO) + op1.args += [lengthdescr, arraydescr] + return [op1, SpaceOperation('-live-', [], None)] + # ---------- # Strings and Unicodes. From noreply at buildbot.pypy.org Thu Jul 18 12:45:19 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 12:45:19 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: it was supposed to be like that Message-ID: <20130718104519.C21681C021A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65459:f9ebe56a74dc Date: 2013-07-18 12:44 +0200 http://bitbucket.org/pypy/pypy/changeset/f9ebe56a74dc/ Log: it was supposed to be like that diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1619,12 +1619,12 @@ def do_resizable_list__resize_ge(self, op, args, *descrs): index = EffectInfo.OS_LIST_RESIZE_GE - op1 = self.handle_residual_call(op, oopspecindex=index)[0] + oplist = self.handle_residual_call(op, oopspecindex=index)[0] LIST = args[0].concretetype.TO lengthdescr = self.cpu.fielddescrof(LIST, 'length') arraydescr = self.cpu.arraydescrof(LIST.items.TO) - op1.args += [lengthdescr, arraydescr] - return [op1, SpaceOperation('-live-', [], None)] + oplist[0].args += [lengthdescr, arraydescr] + return oplist # ---------- # Strings and Unicodes. From noreply at buildbot.pypy.org Thu Jul 18 14:46:00 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 18 Jul 2013 14:46:00 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: implementing immutables and trying to fix the stealing of weakrefs Message-ID: <20130718124600.0DE9D1C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r409:1cf2347f286d Date: 2013-07-18 14:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/1cf2347f286d/ Log: implementing immutables and trying to fix the stealing of weakrefs diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -516,8 +516,8 @@ t = (nodeptr)read_barrier(ptrs[i]); w = t->weakref; if(w) { + assert(stm_get_tid((gcptr)w) == GCTID_WEAKREF); if (w->node) { - assert(stm_get_tid((gcptr)w) == GCTID_WEAKREF); check((gcptr)w->node); return (gcptr)w->node; } diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -545,6 +545,7 @@ gcptr stm_WriteBarrier(gcptr P) { + assert(!(P->h_tid & GCFLAG_IMMUTABLE)); if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -72,6 +72,8 @@ static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; +static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; + /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -89,6 +91,8 @@ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ + "HAS_ID", \ + "IMMUTABLE", \ NULL } #define IS_POINTER(v) (!((v) & 1)) /* even-valued number */ diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -101,6 +101,13 @@ return P; } +gcptr stm_allocate_immutable(size_t size, unsigned long tid) +{ + gcptr P = stm_allocate(size, tid); + P->h_tid |= GCFLAG_IMMUTABLE; + return P; +} + gcptr stmgc_duplicate(gcptr P) { size_t size = stmgc_size(P); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -21,9 +21,47 @@ { gcptr stub, obj = *pobj; if (obj == NULL || (obj->h_tid & (GCFLAG_PUBLIC | GCFLAG_OLD)) == - (GCFLAG_PUBLIC | GCFLAG_OLD)) + (GCFLAG_PUBLIC | GCFLAG_OLD)) return; + if (obj->h_tid & GCFLAG_IMMUTABLE) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + if (!(obj->h_tid & GCFLAG_OLD)) { + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + + dprintf(("steal prot immutable -> public: %p | %p\n", obj, O)); + stub = obj; + goto done; + } + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + } + return; + } + /* we use 'all_stubs', a dictionary, in order to try to avoid duplicate stubs for the same object. XXX maybe it would be better to use a fast approximative cache that stays around for diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -29,6 +29,9 @@ /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); +/* allocate an object that is be immutable. it cannot be changed with + a stm_write_barrier() or after the next commit */ +gcptr stm_allocate_immutable(size_t size, unsigned long tid); /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -6,7 +6,7 @@ gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj) { stm_push_root(obj); - gcptr weakref = stm_allocate(size, tid); + gcptr weakref = stm_allocate_immutable(size, tid); obj = stm_pop_root(); assert(!(weakref->h_tid & GCFLAG_OLD)); /* 'size' too big? */ assert(stmgc_size(weakref) == size); From noreply at buildbot.pypy.org Thu Jul 18 14:50:07 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 18 Jul 2013 14:50:07 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: typo that doesn't change much Message-ID: <20130718125007.DBDF71C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r410:212e6e027030 Date: 2013-07-18 14:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/212e6e027030/ Log: typo that doesn't change much diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -53,7 +53,7 @@ write_barriers allowed. */ dprintf(("steal prot immutable -> public: %p | %p\n", obj, O)); - stub = obj; + stub = O; goto done; } dprintf(("prot immutable -> public: %p\n", obj)); From noreply at buildbot.pypy.org Thu Jul 18 16:12:11 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 18 Jul 2013 16:12:11 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: clean it up a bit Message-ID: <20130718141211.AEDEE1C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r411:ff8c751610ca Date: 2013-07-18 15:41 +0200 http://bitbucket.org/pypy/stmgc/changeset/ff8c751610ca/ Log: clean it up a bit diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -26,39 +26,47 @@ if (obj->h_tid & GCFLAG_IMMUTABLE) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public */ + assert(obj->h_tid & GCFLAG_NURSERY_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + goto done; + } + /* old or young protected! mark as PUBLIC */ - if (!(obj->h_tid & GCFLAG_PUBLIC)) { - if (!(obj->h_tid & GCFLAG_OLD)) { - gcptr O; + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); - if (obj->h_tid & GCFLAG_HAS_ID) { - /* use id-copy for us */ - O = (gcptr)obj->h_original; - obj->h_tid &= ~GCFLAG_HAS_ID; - stm_copy_to_old_id_copy(obj, O); - O->h_original = 0; - } else { - O = stmgc_duplicate_old(obj); - - /* young and without original? */ - if (!(obj->h_original)) - obj->h_original = (revision_t)O; - } - obj->h_revision = (revision_t)O; - - O->h_tid |= GCFLAG_PUBLIC; - obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); - /* here it is fine if it stays in read caches because - the object is immutable anyway and there are no - write_barriers allowed. */ - - dprintf(("steal prot immutable -> public: %p | %p\n", obj, O)); - stub = O; - goto done; + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; } - dprintf(("prot immutable -> public: %p\n", obj)); - obj->h_tid |= GCFLAG_PUBLIC; + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + return; } From noreply at buildbot.pypy.org Thu Jul 18 16:12:12 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 18 Jul 2013 16:12:12 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: and there was a bug in demo_random Message-ID: <20130718141212.B306C1C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r412:324c3f18bbad Date: 2013-07-18 16:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/324c3f18bbad/ Log: and there was a bug in demo_random diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -506,7 +506,7 @@ gcptr weakref_events(gcptr p, gcptr _r, gcptr _sr) { nodeptr t; - weaknodeptr w; + weaknodeptr w, ww; gcptr ptrs[] = {_r, _sr}; int i = get_rand(2); @@ -516,10 +516,11 @@ t = (nodeptr)read_barrier(ptrs[i]); w = t->weakref; if(w) { - assert(stm_get_tid((gcptr)w) == GCTID_WEAKREF); - if (w->node) { - check((gcptr)w->node); - return (gcptr)w->node; + ww = stm_read_barrier(w); + assert(stm_get_tid((gcptr)ww) == GCTID_WEAKREF); + if (ww->node) { + check((gcptr)ww->node); + return (gcptr)ww->node; } else { t->weakref = NULL; diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -18,7 +18,6 @@ /***** Minor collection *****/ - static int is_in_nursery(struct tx_descriptor *d, gcptr obj) { return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); From noreply at buildbot.pypy.org Thu Jul 18 17:14:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 17:14:19 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: Comment Message-ID: <20130718151419.49E331C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: weakref Changeset: r413:de1a136dd7eb Date: 2013-07-18 17:14 +0200 http://bitbucket.org/pypy/stmgc/changeset/de1a136dd7eb/ Log: Comment diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -115,7 +115,9 @@ /* weakref support: allocate a weakref object, and set it to point weakly to 'obj'. The weak pointer offset is hard-coded to be at - 'size - WORD'. Important: stmcb_trace() must NOT trace it. */ + 'size - WORD'. Important: stmcb_trace() must NOT trace it. + Weakrefs are *immutable*! Don't attempt to use stm_write_barrier() + on them. */ gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); From noreply at buildbot.pypy.org Thu Jul 18 18:18:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 18:18:06 +0200 (CEST) Subject: [pypy-commit] cffi default: I think that this should clean up the (relative) mess of the GC on Message-ID: <20130718161806.86FF61C1536@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1279:3751a1489e7b Date: 2013-07-18 18:12 +0200 http://bitbucket.org/cffi/cffi/changeset/3751a1489e7b/ Log: I think that this should clean up the (relative) mess of the GC on some-but-not-all cdata objects. The test still doesn't pass though. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -144,11 +144,14 @@ static PyTypeObject CField_Type; static PyTypeObject CData_Type; static PyTypeObject CDataOwning_Type; +static PyTypeObject CDataOwningGC_Type; #define CTypeDescr_Check(ob) (Py_TYPE(ob) == &CTypeDescr_Type) #define CData_Check(ob) (Py_TYPE(ob) == &CData_Type || \ - Py_TYPE(ob) == &CDataOwning_Type) -#define CDataOwn_Check(ob) (Py_TYPE(ob) == &CDataOwning_Type) + Py_TYPE(ob) == &CDataOwning_Type || \ + Py_TYPE(ob) == &CDataOwningGC_Type) +#define CDataOwn_Check(ob) (Py_TYPE(ob) == &CDataOwning_Type || \ + Py_TYPE(ob) == &CDataOwningGC_Type) typedef union { unsigned char m_char; @@ -561,13 +564,6 @@ PyObject_Del(cf); } -static int -cfield_traverse(CFieldObject *cf, visitproc visit, void *arg) -{ - Py_VISIT(cf->cf_type); - return 0; -} - #undef OFF #define OFF(x) offsetof(CFieldObject, x) @@ -602,7 +598,7 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ - (traverseproc)cfield_traverse, /* tp_traverse */ + 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ @@ -1384,26 +1380,17 @@ Py_DECREF(cd->c_type); #ifndef CFFI_MEM_LEAK /* never release anything, tests only */ - PyObject_Del(cd); + Py_TYPE(cd)->tp_free((PyObject *)cd); #endif } static void cdataowning_dealloc(CDataObject *cd) { + assert(!(cd->c_type->ct_flags & (CT_IS_VOID_PTR | CT_FUNCTIONPTR))); + if (cd->c_type->ct_flags & CT_IS_PTR_TO_OWNED) { Py_DECREF(((CDataObject_own_structptr *)cd)->structobj); } - else if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { - PyObject *x = (PyObject *)(cd->c_data + 42); - Py_DECREF(x); - } - else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { - /* a callback */ - ffi_closure *closure = (ffi_closure *)cd->c_data; - PyObject *args = (PyObject *)(closure->user_data); - Py_XDECREF(args); - cffi_closure_free(closure); - } #if defined(CFFI_MEM_DEBUG) || defined(CFFI_MEM_LEAK) if (cd->c_type->ct_flags & (CT_PRIMITIVE_ANY | CT_STRUCT | CT_UNION)) { assert(cd->c_type->ct_size >= 0); @@ -1420,28 +1407,55 @@ cdata_dealloc(cd); } -static int cdata_traverse(CDataObject *cd, visitproc visit, void *arg) +static void cdataowninggc_dealloc(CDataObject *cd) { - /* XXX needs Py_TPFLAGS_HAVE_GC */ - Py_VISIT(cd->c_type); - return 0; + assert(!(cd->c_type->ct_flags & (CT_IS_PTR_TO_OWNED | + CT_PRIMITIVE_ANY | + CT_STRUCT | CT_UNION))); + PyObject_GC_UnTrack(cd); + + if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { /* a handle */ + PyObject *x = (PyObject *)(cd->c_data + 42); + Py_DECREF(x); + } + else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ + ffi_closure *closure = (ffi_closure *)cd->c_data; + PyObject *args = (PyObject *)(closure->user_data); + Py_XDECREF(args); + cffi_closure_free(closure); + } + cdata_dealloc(cd); } -static int cdataowning_traverse(CDataObject *cd, visitproc visit, void *arg) +static int cdataowninggc_traverse(CDataObject *cd, visitproc visit, void *arg) { - if (cd->c_type->ct_flags & CT_IS_PTR_TO_OWNED) { - Py_VISIT(((CDataObject_own_structptr *)cd)->structobj); - } - else if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { + if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { /* a handle */ PyObject *x = (PyObject *)(cd->c_data + 42); Py_VISIT(x); } - else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { + else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ ffi_closure *closure = (ffi_closure *)cd->c_data; PyObject *args = (PyObject *)(closure->user_data); Py_VISIT(args); } - return cdata_traverse(cd, visit, arg); + return 0; +} + +static int cdataowninggc_clear(CDataObject *cd) +{ + if (cd->c_type->ct_flags & CT_IS_VOID_PTR) { /* a handle */ + PyObject *x = (PyObject *)(cd->c_data + 42); + Py_INCREF(Py_None); + cd->c_data = ((char *)Py_None) - 42; + Py_DECREF(x); + } + else if (cd->c_type->ct_flags & CT_FUNCTIONPTR) { /* a callback */ + ffi_closure *closure = (ffi_closure *)cd->c_data; + PyObject *args = (PyObject *)(closure->user_data); + closure->user_data = NULL; + Py_XDECREF(args); + } + return 0; } static PyObject *cdata_float(CDataObject *cd); /*forward*/ @@ -2415,7 +2429,7 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES, /* tp_flags */ 0, /* tp_doc */ - (traverseproc)cdata_traverse, /* tp_traverse */ + 0, /* tp_traverse */ 0, /* tp_clear */ cdata_richcompare, /* tp_richcompare */ offsetof(CDataObject, c_weakreflist), /* tp_weaklistoffset */ @@ -2445,7 +2459,7 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES, /* tp_flags */ 0, /* tp_doc */ - (traverseproc)cdataowning_traverse, /* tp_traverse */ + 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ @@ -2457,6 +2471,41 @@ &CData_Type, /* tp_base */ }; +static PyTypeObject CDataOwningGC_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "_cffi_backend.CDataOwnGC", + sizeof(CDataObject), + 0, + (destructor)cdataowninggc_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES /* tp_flags */ + | Py_TPFLAGS_HAVE_GC, + 0, /* tp_doc */ + (traverseproc)cdataowninggc_traverse, /* tp_traverse */ + (inquiry)cdataowninggc_clear, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + &CDataOwning_Type, /* tp_base */ +}; + /************************************************************/ typedef struct { @@ -4364,7 +4413,7 @@ closure = cffi_closure_alloc(); - cd = PyObject_New(CDataObject, &CDataOwning_Type); + cd = PyObject_GC_New(CDataObject, &CDataOwningGC_Type); if (cd == NULL) goto error; Py_INCREF(ct); @@ -4799,12 +4848,14 @@ return NULL; } - cd = allocate_owning_object(sizeof(CDataObject), ct); + cd = (CDataObject *)PyObject_GC_New(CDataObject, &CDataOwningGC_Type); if (cd == NULL) return NULL; - + Py_INCREF(ct); + cd->c_type = ct; Py_INCREF(x); cd->c_data = ((char *)x) - 42; + cd->c_weakreflist = NULL; return (PyObject *)cd; } @@ -5278,6 +5329,8 @@ INITERROR; if (PyType_Ready(&CDataOwning_Type) < 0) INITERROR; + if (PyType_Ready(&CDataOwningGC_Type) < 0) + INITERROR; if (PyType_Ready(&CDataIter_Type) < 0) INITERROR; if (PyType_Ready(&MiniBuffer_Type) < 0) From noreply at buildbot.pypy.org Thu Jul 18 18:18:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 18:18:07 +0200 (CEST) Subject: [pypy-commit] cffi default: Found and fixed the bug. Message-ID: <20130718161807.9D0151C1536@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1280:f2c8f99da729 Date: 2013-07-18 18:15 +0200 http://bitbucket.org/cffi/cffi/changeset/f2c8f99da729/ Log: Found and fixed the bug. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4420,6 +4420,7 @@ cd->c_type = ct; cd->c_data = (char *)closure; cd->c_weakreflist = NULL; + PyObject_GC_Track(cd); cif_descr = (cif_description_t *)ct->ct_extra; if (cif_descr == NULL) { @@ -4856,6 +4857,7 @@ Py_INCREF(x); cd->c_data = ((char *)x) - 42; cd->c_weakreflist = NULL; + PyObject_GC_Track(cd); return (PyObject *)cd; } From noreply at buildbot.pypy.org Thu Jul 18 18:25:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 18:25:53 +0200 (CEST) Subject: [pypy-commit] cffi default: Add the (now-passing) test from issue #92 Message-ID: <20130718162553.602EF1C1536@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1281:ec0b6e951f55 Date: 2013-07-18 18:25 +0200 http://bitbucket.org/cffi/cffi/changeset/ec0b6e951f55/ Log: Add the (now-passing) test from issue #92 diff --git a/testing/test_function.py b/testing/test_function.py --- a/testing/test_function.py +++ b/testing/test_function.py @@ -381,3 +381,24 @@ sin100 = my_decorator(m.sin) x = sin100(1.23) assert x == math.sin(1.23) + 100 + + def test_free_callback_cycle(self): + import weakref + def make_callback(data): + container = [data] + callback = ffi.callback('int()', lambda: len(container)) + container.append(callback) + # Ref cycle: callback -> lambda (closure) -> container -> callback + return callback + + class Data(object): + pass + ffi = FFI(backend=self.Backend()) + data = Data() + callback = make_callback(data) + wr = weakref.ref(data) + del callback, data + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None # 'data' does not leak From noreply at buildbot.pypy.org Thu Jul 18 18:35:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 18:35:01 +0200 (CEST) Subject: [pypy-commit] pypy default: int_rshift needs to be there too, because 'n >> m' at app-level checks for more Message-ID: <20130718163501.0CAF51C1545@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65460:c71578a1ec52 Date: 2013-07-18 18:34 +0200 http://bitbucket.org/pypy/pypy/changeset/c71578a1ec52/ Log: int_rshift needs to be there too, because 'n >> m' at app-level checks for more diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -45,6 +45,7 @@ 'int_floordiv': 'interp_intop.int_floordiv', 'int_mod': 'interp_intop.int_mod', 'int_lshift': 'interp_intop.int_lshift', + 'int_rshift': 'interp_intop.int_rshift', 'uint_rshift': 'interp_intop.uint_rshift', } diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py --- a/pypy/module/__pypy__/interp_intop.py +++ b/pypy/module/__pypy__/interp_intop.py @@ -29,6 +29,10 @@ return space.wrap(llop.int_lshift(lltype.Signed, n, m)) @unwrap_spec(n=int, m=int) +def int_rshift(space, n, m): + return space.wrap(llop.int_rshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) def uint_rshift(space, n, m): n = r_uint(n) x = llop.uint_rshift(lltype.Unsigned, n, m) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -79,6 +79,14 @@ assert intop.int_lshift(-sys.maxint // 3, 2) == ( self.intmask((-sys.maxint // 3) << 2)) + def test_int_rshift(self): + from __pypy__ import intop + assert intop.int_rshift(42, 3) == 42 >> 3 + assert intop.int_rshift(-42, 3) == (-42) >> 3 + assert intop.int_rshift(0, 3333) == 0 + assert intop.int_rshift(-1, 0) == -1 + assert intop.int_rshift(-1, 1) == -1 + def test_uint_rshift(self): import sys from __pypy__ import intop From noreply at buildbot.pypy.org Thu Jul 18 18:47:59 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 18:47:59 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: typo and mention schengen Message-ID: <20130718164759.D10D51C02BA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4991:f13a9f0ae2ed Date: 2013-07-18 18:47 +0200 http://bitbucket.org/pypy/extradoc/changeset/f13a9f0ae2ed/ Log: typo and mention schengen diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -17,7 +17,7 @@ * STM and STM-related topics -* whatever attendands find interesting :-) +* whatever attendants find interesting :-) ----------- Exact times @@ -63,3 +63,5 @@ http://mail.python.org/mailman/listinfo/pypy-dev Remember that you may need a UK-to-(insert country here) power adapter. +Please also note that UK is not within the Schengen zone, so you need +to bring your passport if coming from any other country. From noreply at buildbot.pypy.org Thu Jul 18 19:03:29 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 19:03:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: try to rework the newcomer section Message-ID: <20130718170329.E9E2E1C13F9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r4992:a105188a08bf Date: 2013-07-18 19:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/a105188a08bf/ Log: try to rework the newcomer section diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -3,22 +3,29 @@ ===================================================================== The next PyPy sprint will be in London, United Kingdom for the first -time. This is a fully public sprint: newcomers and topics other than -those proposed below are welcome. +time. This is a fully public sprint. PyPy sprints are a very good way +to get into PyPy development and no prior PyPy knowledge is necessary. ------------------------------ Goals and topics of the sprint ------------------------------ -* porting applications or libraries to run on PyPy +For newcomers: + +* bring your application/library and we'll help you port it to PyPy, + benchmark and profile + +* come and write your favorite missing numpy function + +* help us work on developer tools like jitviewer + +We'll also work on: * refactoring the JIT optimizations * STM and STM-related topics -* whatever attendants find interesting :-) - ----------- Exact times ----------- From noreply at buildbot.pypy.org Thu Jul 18 19:09:29 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 18 Jul 2013 19:09:29 +0200 (CEST) Subject: [pypy-commit] pypy python-loop-unroll: Mergd upstream Message-ID: <20130718170929.E3BEA1C13F9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: python-loop-unroll Changeset: r65461:75ab68250185 Date: 2013-07-18 09:48 -0700 http://bitbucket.org/pypy/pypy/changeset/75ab68250185/ Log: Mergd upstream diff too long, truncating to 2000 out of 22444 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ .idea .project .pydevproject +__pycache__ syntax: regexp ^testresult$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,6 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -218,32 +281,33 @@ Impara, Germany Change Maker, Sweden University of California Berkeley, USA + Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -58,19 +57,19 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -80,7 +79,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars @@ -128,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,6 +134,11 @@ DEBUG = 10 NOTSET = 0 +# NOTE(flaper87): This is different from +# python's stdlib module since pypy's +# dicts are much faster when their +# keys are all of the same type. +# Introduced in commit 9de7b40c586f _levelToName = { CRITICAL: 'CRITICAL', ERROR: 'ERROR', @@ -151,6 +156,8 @@ 'DEBUG': DEBUG, 'NOTSET': NOTSET, } +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ @@ -166,7 +173,11 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelToName.get(level, ("Level %s" % level)) + + # NOTE(flaper87): Check also in _nameToLevel + # if value is None. + return (_levelToName.get(level) or + _nameToLevel.get(level, ("Level %s" % level))) def addLevelName(level, levelName): """ diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,16 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - try: - self._sock._decref_socketios() - except AttributeError: - pass # bah, someone built a _fileobject manually - # with some unexpected replacement of the - # _socketobject class + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -278,6 +278,24 @@ def test_invalid_name(self): self.assertRaises(TypeError, logging.getLogger, any) + def test_get_level_name(self): + """Test getLevelName returns level constant.""" + # NOTE(flaper87): Bug #1517 + self.assertEqual(logging.getLevelName('NOTSET'), 0) + self.assertEqual(logging.getLevelName('DEBUG'), 10) + self.assertEqual(logging.getLevelName('INFO'), 20) + self.assertEqual(logging.getLevelName('WARN'), 30) + self.assertEqual(logging.getLevelName('WARNING'), 30) + self.assertEqual(logging.getLevelName('ERROR'), 40) + self.assertEqual(logging.getLevelName('CRITICAL'), 50) + + self.assertEqual(logging.getLevelName(0), 'NOTSET') + self.assertEqual(logging.getLevelName(10), 'DEBUG') + self.assertEqual(logging.getLevelName(20), 'INFO') + self.assertEqual(logging.getLevelName(30), 'WARNING') + self.assertEqual(logging.getLevelName(40), 'ERROR') + self.assertEqual(logging.getLevelName(50), 'CRITICAL') + class BasicFilterTest(BaseTest): """Test the bundled Filter class.""" diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -20,7 +20,7 @@ or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name - # in the interp levle thrown TypeError + # in the interp level thrown TypeError # from rawffi if there are more raise TypeError('bit fields not allowed for type ' + tp.__name__) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -1197,6 +1210,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,14 +1,20 @@ -import os, sys +import os, sys, imp import tempfile -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -19,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -37,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] + '/EXPORT:init' + modulename] else: libraries = [] extra_ldargs = [] @@ -49,14 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -try: - import cpyext -except ImportError: - raise ImportError("No module named '_testcapi'") -else: - compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,62 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/__init__.py @@ -0,0 +1,48 @@ +# _tkinter package -- low-level interface to libtk and libtcl. +# +# This is an internal module, applications should "import Tkinter" instead. +# +# This version is based on cffi, and is a translation of _tkinter.c +# from CPython, version 2.7.4. + +class TclError(Exception): + pass + +import cffi +try: + from .tklib import tklib, tkffi +except cffi.VerificationError: + raise ImportError("Tk headers and development libraries are required") + +from .app import TkApp + +TK_VERSION = tkffi.string(tklib.get_tk_version()) +TCL_VERSION = tkffi.string(tklib.get_tcl_version()) + +READABLE = tklib.TCL_READABLE +WRITABLE = tklib.TCL_WRITABLE +EXCEPTION = tklib.TCL_EXCEPTION + +def create(screenName=None, baseName=None, className=None, + interactive=False, wantobjects=False, wantTk=True, + sync=False, use=None): + return TkApp(screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use) + +def _flatten(item): + def _flatten1(output, item, depth): + if depth > 1000: + raise ValueError("nesting too deep in _flatten") + if not isinstance(item, (list, tuple)): + raise TypeError("argument must be sequence") + # copy items to output tuple + for o in item: + if isinstance(o, (list, tuple)): + _flatten1(output, o, depth + 1) + elif o is not None: + output.append(o) + + result = [] + _flatten1(result, item, 0) + return tuple(result) + diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/app.py @@ -0,0 +1,389 @@ +# The TkApp class. + +from .tklib import tklib, tkffi +from . import TclError +from .tclobj import TclObject, FromObj, AsObj, TypeCache + +import sys + +def varname_converter(input): + if isinstance(input, TclObject): + return input.string + return input + + +def Tcl_AppInit(app): + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + skip_tk_init = tklib.Tcl_GetVar( + app.interp, "_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) + if skip_tk_init and tkffi.string(skip_tk_init) == "1": + return + + if tklib.Tk_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + +class _CommandData(object): + def __new__(cls, app, name, func): + self = object.__new__(cls) + self.app = app + self.name = name + self.func = func + handle = tkffi.new_handle(self) + app._commands[name] = handle # To keep the command alive + return tkffi.cast("ClientData", handle) + + @tkffi.callback("Tcl_CmdProc") + def PythonCmd(clientData, interp, argc, argv): + self = tkffi.from_handle(clientData) + assert self.app.interp == interp + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK + + @tkffi.callback("Tcl_CmdDeleteProc") + def PythonCmdDelete(clientData): + self = tkffi.from_handle(clientData) + app = self.app + del app._commands[self.name] + return + + +class TkApp(object): + def __new__(cls, screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use): + if not wantobjects: + raise NotImplementedError("wantobjects=True only") + self = object.__new__(cls) + self.interp = tklib.Tcl_CreateInterp() + self._wantobjects = wantobjects + self.threaded = bool(tklib.Tcl_GetVar2Ex( + self.interp, "tcl_platform", "threaded", + tklib.TCL_GLOBAL_ONLY)) + self.thread_id = tklib.Tcl_GetCurrentThread() + self.dispatching = False + self.quitMainLoop = False + self.errorInCmd = False + + self._typeCache = TypeCache() + self._commands = {} + + # Delete the 'exit' command, which can screw things up + tklib.Tcl_DeleteCommand(self.interp, "exit") + + if screenName is not None: + tklib.Tcl_SetVar2(self.interp, "env", "DISPLAY", screenName, + tklib.TCL_GLOBAL_ONLY) + + if interactive: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "1", + tklib.TCL_GLOBAL_ONLY) + else: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "0", + tklib.TCL_GLOBAL_ONLY) + + # This is used to get the application class for Tk 4.1 and up + argv0 = className.lower() + tklib.Tcl_SetVar(self.interp, "argv0", argv0, + tklib.TCL_GLOBAL_ONLY) + + if not wantTk: + tklib.Tcl_SetVar(self.interp, "_tkinter_skip_tk_init", "1", + tklib.TCL_GLOBAL_ONLY) + + # some initial arguments need to be in argv + if sync or use: + args = "" + if sync: + args += "-sync" + if use: + if sync: + args += " " + args += "-use " + use + + tklib.Tcl_SetVar(self.interp, "argv", args, + tklib.TCL_GLOBAL_ONLY) + + Tcl_AppInit(self) + # EnableEventHook() + return self + + def __del__(self): + tklib.Tcl_DeleteInterp(self.interp) + # DisableEventHook() + + def raiseTclError(self): + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + raise TclError(tkffi.string(tklib.Tcl_GetStringResult(self.interp))) + + def wantobjects(self): + return self._wantobjects + + def _check_tcl_appartment(self): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise RuntimeError("Calling Tcl from different appartment") + + def loadtk(self): + # We want to guard against calling Tk_Init() multiple times + err = tklib.Tcl_Eval(self.interp, "info exists tk_version") + if err == tklib.TCL_ERROR: + self.raiseTclError() + tk_exists = tklib.Tcl_GetStringResult(self.interp) + if not tk_exists or tkffi.string(tk_exists) != "1": + err = tklib.Tk_Init(self.interp) + if err == tklib.TCL_ERROR: + self.raiseTclError() + + def _var_invoke(self, func, *args, **kwargs): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # The current thread is not the interpreter thread. + # Marshal the call to the interpreter thread, then wait + # for completion. + raise NotImplementedError("Call from another thread") + return func(*args, **kwargs) + + def _getvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) + + def _setvar(self, name1, value, global_only=False): + name1 = varname_converter(name1) + newval = AsObj(value) + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() + + def _unsetvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def getvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2) + + def globalgetvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2, global_only=True) + + def setvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value) + + def globalsetvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value, global_only=True) + + def unsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2) + + def globalunsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2, global_only=True) + + # COMMANDS + + def createcommand(self, cmdName, func): + if not callable(func): + raise TypeError("command not callable") + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + clientData = _CommandData(self, cmdName, func) + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) + if not res: + raise TclError("can't create Tcl command") + + def deletecommand(self, cmdName): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + if res == -1: + raise TclError("can't delete Tcl command") + + def call(self, *args): + flags = tklib.TCL_EVAL_DIRECT | tklib.TCL_EVAL_GLOBAL + + # If args is a single tuple, replace with contents of tuple + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # We cannot call the command directly. Instead, we must + # marshal the parameters to the interpreter thread. + raise NotImplementedError("Call from another thread") + + objects = tkffi.new("Tcl_Obj*[]", len(args)) + argc = len(args) + try: + for i, arg in enumerate(args): + if arg is None: + argc = i + break + obj = AsObj(arg) + tklib.Tcl_IncrRefCount(obj) + objects[i] = obj + + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() + finally: + for obj in objects: + if obj: + tklib.Tcl_DecrRefCount(obj) + return result + + def _callResult(self): + assert self._wantobjects + value = tklib.Tcl_GetObjResult(self.interp) + # Not sure whether the IncrRef is necessary, but something + # may overwrite the interpreter result while we are + # converting it. + tklib.Tcl_IncrRefCount(value) + res = FromObj(self, value) + tklib.Tcl_DecrRefCount(value) + return res + + def eval(self, script): + self._check_tcl_appartment() + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def evalfile(self, filename): + self._check_tcl_appartment() + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def split(self, arg): + if isinstance(arg, tuple): + return self._splitObj(arg) + else: + return self._split(arg) + + def splitlist(self, arg): + if isinstance(arg, tuple): + return arg + if isinstance(arg, unicode): + arg = arg.encode('utf8') + + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(self.interp, arg, argc, argv) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + result = tuple(tkffi.string(argv[0][i]) + for i in range(argc[0])) + tklib.Tcl_Free(argv[0]) + return result + + def _splitObj(self, arg): + if isinstance(arg, tuple): + size = len(arg) + # Recursively invoke SplitObj for all tuple items. + # If this does not return a new object, no action is + # needed. + result = None + newelems = (self._splitObj(elem) for elem in arg) + for elem, newelem in zip(arg, newelems): + if elem is not newelem: + return newelems + elif isinstance(arg, str): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + return arg + tklib.Tcl_Free(argv[0]) + if argc[0] > 1: + return self._split(arg) + return arg + + def _split(self, arg): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + # Not a list. + # Could be a quoted string containing funnies, e.g. {"}. + # Return the string itself. + return arg + + try: + if argc[0] == 0: + return "" + elif argc[0] == 1: + return argv[0][0] + else: + return (self._split(argv[0][i]) + for i in range(argc[0])) + finally: + tklib.Tcl_Free(argv[0]) + + def getboolean(self, s): + if isinstance(s, int): + return s + v = tkffi.new("int*") + res = tklib.Tcl_GetBoolean(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def mainloop(self, threshold): + self._check_tcl_appartment() + self.dispatching = True + while (tklib.Tk_GetNumMainWindows() > threshold and + not self.quitMainLoop and not self.errorInCmd): + + if self.threaded: + result = tklib.Tcl_DoOneEvent(0) + else: + raise NotImplementedError("TCL configured without threads") + + if result < 0: + break + self.dispatching = False + self.quitMainLoop = False + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + + def quit(self): + self.quitMainLoop = True diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tclobj.py @@ -0,0 +1,114 @@ +# TclObject, conversions with Python objects + +from .tklib import tklib, tkffi + +class TypeCache(object): + def __init__(self): + self.BooleanType = tklib.Tcl_GetObjType("boolean") + self.ByteArrayType = tklib.Tcl_GetObjType("bytearray") + self.DoubleType = tklib.Tcl_GetObjType("double") + self.IntType = tklib.Tcl_GetObjType("int") + self.ListType = tklib.Tcl_GetObjType("list") + self.ProcBodyType = tklib.Tcl_GetObjType("procbody") + self.StringType = tklib.Tcl_GetObjType("string") + + +def FromObj(app, value): + """Convert a TclObj pointer into a Python object.""" + typeCache = app._typeCache + if not value.typePtr: + buf = tkffi.buffer(value.bytes, value.length) + result = buf[:] + # If the result contains any bytes with the top bit set, it's + # UTF-8 and we should decode it to Unicode. + try: + result.decode('ascii') + except UnicodeDecodeError: + result = result.decode('utf8') + return result + + elif value.typePtr == typeCache.BooleanType: + return result + elif value.typePtr == typeCache.ByteArrayType: + return result + elif value.typePtr == typeCache.DoubleType: + return value.internalRep.doubleValue + elif value.typePtr == typeCache.IntType: + return value.internalRep.longValue + elif value.typePtr == typeCache.ListType: + size = tkffi.new('int*') + status = tklib.Tcl_ListObjLength(app.interp, value, size) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result = [] + tcl_elem = tkffi.new("Tcl_Obj**") + for i in range(size[0]): + status = tklib.Tcl_ListObjIndex(app.interp, + value, i, tcl_elem) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result.append(FromObj(app, tcl_elem[0])) + return tuple(result) + elif value.typePtr == typeCache.ProcBodyType: + return result + elif value.typePtr == typeCache.StringType: + buf = tklib.Tcl_GetUnicode(value) + length = tklib.Tcl_GetCharLength(value) + buf = tkffi.buffer(tkffi.cast("char*", buf), length*2)[:] + return buf.decode('utf-16') + + return TclObject(value) + +def AsObj(value): + if isinstance(value, str): + return tklib.Tcl_NewStringObj(value, len(value)) + elif isinstance(value, bool): + return tklib.Tcl_NewBooleanObj(value) + elif isinstance(value, int): + return tklib.Tcl_NewLongObj(value) + elif isinstance(value, float): + return tklib.Tcl_NewDoubleObj(value) + elif isinstance(value, tuple): + argv = tkffi.new("Tcl_Obj*[]", len(value)) + for i in range(len(value)): + argv[i] = AsObj(value[i]) + return tklib.Tcl_NewListObj(len(value), argv) + elif isinstance(value, unicode): + encoded = value.encode('utf-16')[2:] + buf = tkffi.new("char[]", encoded) + inbuf = tkffi.cast("Tcl_UniChar*", buf) + return tklib.Tcl_NewUnicodeObj(buf, len(encoded)/2) + elif isinstance(value, TclObject): + tklib.Tcl_IncrRefCount(value._value) + return value._value + else: + return AsObj(str(value)) + +class TclObject(object): + def __new__(cls, value): + self = object.__new__(cls) + tklib.Tcl_IncrRefCount(value) + self._value = value + self._string = None + return self + + def __del__(self): + tklib.Tcl_DecrRefCount(self._value) + + def __str__(self): + if self._string and isinstance(self._string, str): + return self._string + return tkffi.string(tklib.Tcl_GetString(self._value)) + + @property + def string(self): + if self._string is None: + length = tkffi.new("int*") + s = tklib.Tcl_GetStringFromObj(self._value, length) + value = tkffi.buffer(s, length[0])[:] + try: + value.decode('ascii') + except UnicodeDecodeError: + value = value.decode('utf8') + self._string = value + return self._string diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tklib.py @@ -0,0 +1,114 @@ +# C bindings with libtcl and libtk. + +from cffi import FFI + +tkffi = FFI() + +tkffi.cdef(""" +char *get_tk_version(); +char *get_tcl_version(); +#define TCL_READABLE ... +#define TCL_WRITABLE ... +#define TCL_EXCEPTION ... +#define TCL_ERROR ... +#define TCL_OK ... + +#define TCL_LEAVE_ERR_MSG ... +#define TCL_GLOBAL_ONLY ... +#define TCL_EVAL_DIRECT ... +#define TCL_EVAL_GLOBAL ... + +typedef unsigned short Tcl_UniChar; +typedef ... Tcl_Interp; +typedef ...* Tcl_ThreadId; +typedef ...* Tcl_Command; + +typedef struct Tcl_ObjType { + char *name; + ...; +} Tcl_ObjType; +typedef struct Tcl_Obj { + char *bytes; + int length; + Tcl_ObjType *typePtr; + union { /* The internal representation: */ + long longValue; /* - an long integer value. */ + double doubleValue; /* - a double-precision floating value. */ + struct { /* - internal rep as two pointers. */ + void *ptr1; + void *ptr2; + } twoPtrValue; + } internalRep; + ...; +} Tcl_Obj; + +Tcl_Interp *Tcl_CreateInterp(); +void Tcl_DeleteInterp(Tcl_Interp* interp); +int Tcl_Init(Tcl_Interp* interp); +int Tk_Init(Tcl_Interp* interp); + +void Tcl_Free(char* ptr); + +const char *Tcl_SetVar(Tcl_Interp* interp, const char* varName, const char* newValue, int flags); +const char *Tcl_SetVar2(Tcl_Interp* interp, const char* name1, const char* name2, const char* newValue, int flags); +const char *Tcl_GetVar(Tcl_Interp* interp, const char* varName, int flags); +Tcl_Obj *Tcl_SetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, Tcl_Obj* newValuePtr, int flags); +Tcl_Obj *Tcl_GetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +int Tcl_UnsetVar2(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +const Tcl_ObjType *Tcl_GetObjType(const char* typeName); + +Tcl_Obj *Tcl_NewStringObj(const char* bytes, int length); +Tcl_Obj *Tcl_NewUnicodeObj(const Tcl_UniChar* unicode, int numChars); +Tcl_Obj *Tcl_NewLongObj(long longValue); +Tcl_Obj *Tcl_NewBooleanObj(int boolValue); +Tcl_Obj *Tcl_NewDoubleObj(double doubleValue); + +void Tcl_IncrRefCount(Tcl_Obj* objPtr); +void Tcl_DecrRefCount(Tcl_Obj* objPtr); + +int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); +char *Tcl_GetString(Tcl_Obj* objPtr); +char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); + +Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); +int Tcl_GetCharLength(Tcl_Obj* objPtr); + +Tcl_Obj *Tcl_NewListObj(int objc, Tcl_Obj* const objv[]); +int Tcl_ListObjLength(Tcl_Interp* interp, Tcl_Obj* listPtr, int* intPtr); +int Tcl_ListObjIndex(Tcl_Interp* interp, Tcl_Obj* listPtr, int index, Tcl_Obj** objPtrPtr); +int Tcl_SplitList(Tcl_Interp* interp, char* list, int* argcPtr, const char*** argvPtr); + +int Tcl_Eval(Tcl_Interp* interp, const char* script); +int Tcl_EvalFile(Tcl_Interp* interp, const char* filename); +int Tcl_EvalObjv(Tcl_Interp* interp, int objc, Tcl_Obj** objv, int flags); +Tcl_Obj *Tcl_GetObjResult(Tcl_Interp* interp); +const char *Tcl_GetStringResult(Tcl_Interp* interp); +void Tcl_SetObjResult(Tcl_Interp* interp, Tcl_Obj* objPtr); + +typedef void* ClientData; +typedef int Tcl_CmdProc( + ClientData clientData, + Tcl_Interp *interp, + int argc, + const char *argv[]); +typedef void Tcl_CmdDeleteProc( + ClientData clientData); +Tcl_Command Tcl_CreateCommand(Tcl_Interp* interp, const char* cmdName, Tcl_CmdProc proc, ClientData clientData, Tcl_CmdDeleteProc deleteProc); +int Tcl_DeleteCommand(Tcl_Interp* interp, const char* cmdName); + +Tcl_ThreadId Tcl_GetCurrentThread(); +int Tcl_DoOneEvent(int flags); + +int Tk_GetNumMainWindows(); +""") + +tklib = tkffi.verify(""" +#include +#include + +char *get_tk_version() { return TK_VERSION; } +char *get_tcl_version() { return TCL_VERSION; } +""", +include_dirs=['/usr/include/tcl'], +libraries=['tcl', 'tk'], +) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -361,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -707,7 +707,7 @@ class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union - _reftypename = '%s %s &' % (kind, name) + _reftypename = '%s &' % (name,) _kind = kind # CTypesStructOrUnion._fix_class() @@ -934,7 +934,7 @@ # class CTypesEnum(CTypesInt): __slots__ = [] - _reftypename = 'enum %s &' % name + _reftypename = '%s &' % name def _get_own_repr(self): value = self._value diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -244,6 +244,10 @@ self.forcename = forcename self.build_c_name_with_marker() + def get_official_name(self): + assert self.c_name_with_marker.endswith('&') + return self.c_name_with_marker[:-1] + class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None @@ -357,7 +361,9 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) - return global_cache(self, ffi, 'new_struct_type', self.name, key=self) + + return global_cache(self, ffi, 'new_struct_type', + self.get_official_name(), key=self) class UnionType(StructOrUnion): @@ -365,7 +371,8 @@ def build_backend_type(self, ffi, finishlist): finishlist.append(self) - return global_cache(self, ffi, 'new_union_type', self.name, key=self) + return global_cache(self, ffi, 'new_union_type', + self.get_official_name(), key=self) class EnumType(StructOrUnionOrEnum): @@ -388,7 +395,8 @@ def build_backend_type(self, ffi, finishlist): self.check_not_partial() base_btype = self.build_baseinttype(ffi, finishlist) - return global_cache(self, ffi, 'new_enum_type', self.name, + return global_cache(self, ffi, 'new_enum_type', + self.get_official_name(), self.enumerators, self.enumvalues, base_btype, key=self) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -156,6 +156,9 @@ class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() module._cffi_setup(lst, ffiplatform.VerificationError, library) # @@ -701,7 +704,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) # ---------- diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -74,6 +74,9 @@ class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set @@ -168,21 +171,22 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] - if any(isinstance(type, model.StructOrUnion) for type in tp.args): + if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] - for i, type in enumerate(tp.args): - if isinstance(type, model.StructOrUnion): - type = model.PointerType(type) - indirections.append((i, type)) - indirect_args.append(type) + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) tp = model.FunctionPtrType(tuple(indirect_args), tp.result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) - for i, type in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, type) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ) setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp): backend = self.ffi._backend @@ -390,6 +394,7 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # enums @@ -437,6 +442,7 @@ def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers @@ -450,6 +456,7 @@ def _loaded_gen_macro(self, tp, name, module, library): value = self._load_constant(True, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) From noreply at buildbot.pypy.org Thu Jul 18 19:09:31 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 18 Jul 2013 19:09:31 +0200 (CEST) Subject: [pypy-commit] pypy python-loop-unroll: Remove the heuristic for now. Message-ID: <20130718170931.28A801C13F9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: python-loop-unroll Changeset: r65462:3c56ab0bd397 Date: 2013-07-18 09:50 -0700 http://bitbucket.org/pypy/pypy/changeset/3c56ab0bd397/ Log: Remove the heuristic for now. diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -884,8 +884,7 @@ def GET_ITER(self, oparg, next_instr): from pypy.module.__pypy__.interp_unroll import W_LoopUnroller w_iterable = self.popvalue() - length_hint = self.space.length_hint(w_iterable, sys.maxint) - if (jit.isconstant(length_hint) and length_hint < 20) or isinstance(w_iterable, W_LoopUnroller): + if isinstance(w_iterable, W_LoopUnroller): lastblock = self.lastblock # This is the case for comprehensions, which don't add a frame # block, annoying (for now ignore the problem). From noreply at buildbot.pypy.org Thu Jul 18 19:09:32 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 18 Jul 2013 19:09:32 +0200 (CEST) Subject: [pypy-commit] pypy python-loop-unroll: Attempt to write a test Message-ID: <20130718170932.408491C13F9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: python-loop-unroll Changeset: r65463:eba8aacbc617 Date: 2013-07-18 10:08 -0700 http://bitbucket.org/pypy/pypy/changeset/eba8aacbc617/ Log: Attempt to write a test diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -408,3 +408,30 @@ log = self.run(main, [300]) loop, = log.loops_by_id("long_op") assert len(loop.ops_by_id("long_op")) == 0 + + def test_explicit_loop_unrolling(self): + def main(n): + class A(object): + def __init__(self): + self.a = 0 + self.b = 0 + + i = 0 + while i < n: + a = A() + for attr in ["a", "b"]: + setattr(a, attr, getattr(a, attr) + 1) + i += a.b + + log = self.run(main, [300]) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + label(..., descr=TargetToken(4370600008)) + i48 = int_lt(i43, i30) + guard_true(i48, descr=...) + guard_not_invalidated(descr=...) + p49 = force_token() + i50 = int_add(i43, 1) + --TICK-- + jump(..., descr=TargetToken(4370600008)) + """) From noreply at buildbot.pypy.org Thu Jul 18 19:44:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 19:44:03 +0200 (CEST) Subject: [pypy-commit] cffi default: Check in win64.obj, thanks Matti :-) Message-ID: <20130718174403.7D6941C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1282:4690f6191fc5 Date: 2013-07-18 19:43 +0200 http://bitbucket.org/cffi/cffi/changeset/4690f6191fc5/ Log: Check in win64.obj, thanks Matti :-) diff --git a/c/libffi_msvc/win64.obj b/c/libffi_msvc/win64.obj new file mode 100644 index 0000000000000000000000000000000000000000..38d3cd166b0ecad62ea4d9aab86cc574de0c9fe7 GIT binary patch [cut] diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -63,12 +63,13 @@ libraries[:] = [] _filenames = [filename.lower() for filename in os.listdir(COMPILE_LIBFFI)] _filenames = [filename for filename in _filenames - if filename.endswith('.c') or - filename.endswith('.asm')] - if sys.maxsize <= 2**32: - _filenames.remove('win64.asm') - else: + if filename.endswith('.c')] + if sys.maxsize > 2**32: + # 64-bit: unlist win32.c, and add instead win64.obj. If the obj + # happens to get outdated at some point in the future, you need to + # rebuild it manually from win64.asm. _filenames.remove('win32.c') + extra_link_args.append(os.path.join(COMPILE_LIBFFI, 'win64.obj')) sources.extend(os.path.join(COMPILE_LIBFFI, filename) for filename in _filenames) define_macros.append(('USE_C_LIBFFI_MSVC', '1')) From noreply at buildbot.pypy.org Thu Jul 18 19:51:29 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 18 Jul 2013 19:51:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: a note about the Euro and put a line back in Message-ID: <20130718175129.DA6821C02BA@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r4993:4795f97b5d15 Date: 2013-07-18 19:51 +0200 http://bitbucket.org/pypy/extradoc/changeset/4795f97b5d15/ Log: a note about the Euro and put a line back in diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -26,6 +26,8 @@ * STM and STM-related topics +* anything else attendants are interested in + ----------- Exact times ----------- @@ -71,4 +73,5 @@ Remember that you may need a UK-to-(insert country here) power adapter. Please also note that UK is not within the Schengen zone, so you need -to bring your passport if coming from any other country. +to bring your passport if coming from any other country. Also, the UK does not +have the Euro. From noreply at buildbot.pypy.org Thu Jul 18 19:53:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 19:53:32 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: ReSTify Message-ID: <20130718175332.EAEDE1C13F9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4994:fd2e372d7b7f Date: 2013-07-18 19:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/fd2e372d7b7f/ Log: ReSTify diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -67,7 +67,7 @@ https://bitbucket.org/pypy/extradoc/ https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/london-2013 -or on the pypy-dev mailing list if you do not yet have check-in rights: +or on the pypy-dev mailing list if you do not yet have check-in rights:: http://mail.python.org/mailman/listinfo/pypy-dev From noreply at buildbot.pypy.org Thu Jul 18 19:53:53 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 18 Jul 2013 19:53:53 +0200 (CEST) Subject: [pypy-commit] pypy python-loop-unroll: the part of this test where it does the thing Message-ID: <20130718175353.C62751C13F9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: python-loop-unroll Changeset: r65464:0bc2dc5a3bf7 Date: 2013-07-18 10:53 -0700 http://bitbucket.org/pypy/pypy/changeset/0bc2dc5a3bf7/ Log: the part of this test where it does the thing diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -411,6 +411,8 @@ def test_explicit_loop_unrolling(self): def main(n): + from __pypy__ import unroll_loop + class A(object): def __init__(self): self.a = 0 @@ -419,7 +421,7 @@ i = 0 while i < n: a = A() - for attr in ["a", "b"]: + for attr in unroll_loop(["a", "b"]): setattr(a, attr, getattr(a, attr) + 1) i += a.b From noreply at buildbot.pypy.org Thu Jul 18 20:05:16 2013 From: noreply at buildbot.pypy.org (matti) Date: Thu, 18 Jul 2013 20:05:16 +0200 (CEST) Subject: [pypy-commit] cffi windows: skip tests in windows that crash the interpreter (64 bit) Message-ID: <20130718180516.70C7B1C13F9@cobra.cs.uni-duesseldorf.de> Author: matti Branch: windows Changeset: r1283:97ac0788b770 Date: 2013-07-18 21:04 +0300 http://bitbucket.org/cffi/cffi/changeset/97ac0788b770/ Log: skip tests in windows that crash the interpreter (64 bit) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -977,6 +977,9 @@ py.test.raises(TypeError, f, x[0]) def test_call_function_21(): + import os + if os.name == 'nt': + py.test.skip('crashes the interpreter') BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a', BInt, -1), @@ -996,6 +999,9 @@ assert res == sum(lst) def test_call_function_22(): + import os + if os.name == 'nt': + py.test.skip('crashes the interpreter') BInt = new_primitive_type("int") BArray10 = new_array_type(new_pointer_type(BInt), 10) BStruct = new_struct_type("struct foo") @@ -1231,6 +1237,9 @@ assert f(-142) == -142 + i def test_callback_returning_struct(): + import os + if os.name == 'nt': + py.test.skip('crashes the interpreter') BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") BDouble = new_primitive_type("double") @@ -1250,6 +1259,9 @@ assert s.b == 1E-42 def test_callback_returning_big_struct(): + import os + if os.name == 'nt': + py.test.skip('crashes the interpreter') BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") BStructPtr = new_pointer_type(BStruct) From noreply at buildbot.pypy.org Thu Jul 18 20:17:55 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 20:17:55 +0200 (CEST) Subject: [pypy-commit] pypy jit-threshold-hooks: kill a feature that I'll implement differently Message-ID: <20130718181755.071921C13F9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: jit-threshold-hooks Changeset: r65465:4667c17cd952 Date: 2013-07-18 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/4667c17cd952/ Log: kill a feature that I'll implement differently diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -19,7 +19,6 @@ 'Box': 'interp_resop.WrappedBox', 'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)', 'set_local_threshold': 'interp_jit.set_local_threshold', - 'set_local_bridge_threshold': 'interp_jit.set_local_bridge_threshold', } def setup_after_space_initialization(self): diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -54,15 +54,12 @@ greens = ['next_instr', 'is_being_profiled', 'pycode'] virtualizables = ['frame'] -def start_bridge_threshold(next_instr, is_being_profiled, bytecode): - return bytecode.bridge_init_threshold pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, get_jitcell_at = get_jitcell_at, set_jitcell_at = set_jitcell_at, should_unroll_one_iteration = should_unroll_one_iteration, - start_bridge_threshold=start_bridge_threshold, name='pypyjit') class __extend__(PyFrame): @@ -181,7 +178,3 @@ ref = w_code.jit_cells[pos << 1] jitcell = cast_base_ptr_to_instance(BaseJitCell, ref) jitcell.counter = value - - at unwrap_spec(w_code=PyCode, value=int) -def set_local_bridge_threshold(space, w_code, value): - w_code.bridge_init_threshold = value diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -481,7 +481,6 @@ get_jitcell_at=None, set_jitcell_at=None, get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, - start_bridge_threshold=None, name='jitdriver', check_untranslated=True): if greens is not None: self.greens = greens @@ -518,7 +517,6 @@ self.can_never_inline = can_never_inline self.should_unroll_one_iteration = should_unroll_one_iteration self.check_untranslated = check_untranslated - self.start_bridge_threshold = start_bridge_threshold def _freeze_(self): return True From noreply at buildbot.pypy.org Thu Jul 18 20:22:02 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 18 Jul 2013 20:22:02 +0200 (CEST) Subject: [pypy-commit] pypy python-loop-unroll: fix up test Message-ID: <20130718182202.27A591C13F9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: python-loop-unroll Changeset: r65466:891944cf894f Date: 2013-07-18 11:21 -0700 http://bitbucket.org/pypy/pypy/changeset/891944cf894f/ Log: fix up test diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -428,12 +428,13 @@ log = self.run(main, [300]) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - label(..., descr=TargetToken(4370600008)) i48 = int_lt(i43, i30) guard_true(i48, descr=...) guard_not_invalidated(descr=...) p49 = force_token() + --TICK-- + --TICK-- i50 = int_add(i43, 1) --TICK-- - jump(..., descr=TargetToken(4370600008)) + jump(..., descr=...) """) From noreply at buildbot.pypy.org Thu Jul 18 20:57:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 20:57:30 +0200 (CEST) Subject: [pypy-commit] cffi default: Attempt to get rid of USE_C_LIBFFI_MSVC by really fixing libffi_msvc. Message-ID: <20130718185730.5EEFC1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1284:541a90941078 Date: 2013-07-18 20:57 +0200 http://bitbucket.org/cffi/cffi/changeset/541a90941078/ Log: Attempt to get rid of USE_C_LIBFFI_MSVC by really fixing libffi_msvc. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3885,19 +3885,6 @@ } assert(cf == NULL); -#ifdef USE_C_LIBFFI_MSVC - /* MSVC returns small structures in registers. Pretend int32 or - int64 return type. This is needed as a workaround for what - is really a bug of libffi_msvc seen as an independent library - (ctypes has a similar workaround). */ - if (is_result_type) { - if (ct->ct_size <= 4) - return &ffi_type_sint32; - if (ct->ct_size <= 8) - return &ffi_type_sint64; - } -#endif - /* next, allocate and fill the flattened list */ elements = fb_alloc(fb, (nflat + 1) * sizeof(ffi_type*)); nflat = 0; diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -46,7 +46,7 @@ register ffi_type **p_arg; argp = stack; - if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + if (ecif->cif->flags == FFI_TYPE_STRUCT) { *(void **) argp = ecif->rvalue; argp += sizeof(void *); @@ -124,7 +124,6 @@ switch (cif->rtype->type) { case FFI_TYPE_VOID: - case FFI_TYPE_STRUCT: case FFI_TYPE_SINT64: case FFI_TYPE_FLOAT: case FFI_TYPE_DOUBLE: @@ -132,6 +131,18 @@ cif->flags = (unsigned) cif->rtype->type; break; + case FFI_TYPE_STRUCT: + /* MSVC returns small structures in registers. Put in cif->flags + the value FFI_TYPE_STRUCT only if the structure is big enough; + otherwise, put the 4- or 8-bytes integer type. */ + if (cif->rtype->size <= 4) + cif->flags = FFI_TYPE_INT; + else if (cif->rtype->size <= 8) + cif->flags = FFI_TYPE_SINT64; + else + cif->flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_UINT64: #ifdef _WIN64 case FFI_TYPE_POINTER: @@ -180,7 +191,7 @@ /* value address then we need to make one */ if ((rvalue == NULL) && - (cif->rtype->type == FFI_TYPE_STRUCT)) + (cif->flags == FFI_TYPE_STRUCT)) { /*@-sysunrecog@*/ ecif.rvalue = alloca(cif->rtype->size); @@ -338,7 +349,7 @@ argp = stack; - if ( cif->rtype->type == FFI_TYPE_STRUCT ) { + if ( cif->flags == FFI_TYPE_STRUCT ) { *rvalue = *(void **) argp; argp += 4; } diff --git a/c/libffi_msvc/prep_cif.c b/c/libffi_msvc/prep_cif.c --- a/c/libffi_msvc/prep_cif.c +++ b/c/libffi_msvc/prep_cif.c @@ -116,9 +116,9 @@ #if !defined M68K && !defined __x86_64__ && !defined S390 /* Make space for the return structure pointer */ if (cif->rtype->type == FFI_TYPE_STRUCT - /* MSVC returns small structures in registers. But we have a different - workaround: pretend int32 or int64 return type, and converting to - structure afterwards. */ +#ifdef _WIN32 + && (cif->rtype->size > 8) /* MSVC returns small structs in registers */ +#endif #ifdef SPARC && (cif->abi != FFI_V9 || cif->rtype->size > 32) #endif diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -72,7 +72,6 @@ extra_link_args.append(os.path.join(COMPILE_LIBFFI, 'win64.obj')) sources.extend(os.path.join(COMPILE_LIBFFI, filename) for filename in _filenames) - define_macros.append(('USE_C_LIBFFI_MSVC', '1')) else: use_pkg_config() From noreply at buildbot.pypy.org Thu Jul 18 21:32:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 21:32:34 +0200 (CEST) Subject: [pypy-commit] cffi default: Attempt a fix for Win64 Message-ID: <20130718193234.765D51C1536@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1285:40049c1f292a Date: 2013-07-18 21:32 +0200 http://bitbucket.org/cffi/cffi/changeset/40049c1f292a/ Log: Attempt a fix for Win64 diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -102,6 +102,15 @@ FFI_ASSERT(0); } } +#ifdef _WIN64 + else if (z > 8) + { + /* On Win64, if a single argument takes more than 8 bytes, + then it is always passed by reference. */ + *(void **)argp = *p_argv; + z = 8; + } +#endif else { memcpy(argp, *p_argv, z); From noreply at buildbot.pypy.org Thu Jul 18 21:44:51 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 21:44:51 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: hack differently - store extra descrs on effectinfo (I think it's ok Message-ID: <20130718194451.03EEA1C02BA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65467:4a928febab82 Date: 2013-07-18 21:44 +0200 http://bitbucket.org/pypy/pypy/changeset/4a928febab82/ Log: hack differently - store extra descrs on effectinfo (I think it's ok to store extra field on this object, we should not have too many of them) diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -200,7 +200,7 @@ return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescrs=None): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -258,7 +258,8 @@ # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, - oopspecindex, can_invalidate, call_release_gil_target) + oopspecindex, can_invalidate, call_release_gil_target, + extradescrs) # assert effectinfo is not None if elidable or loopinvariant: diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -100,11 +100,13 @@ extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extra_descrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), frozenset_or_none(write_descrs_arrays), + frozenset_or_none(extra_descrs), extraeffect, oopspecindex, can_invalidate) @@ -137,6 +139,7 @@ result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex result.call_release_gil_target = call_release_gil_target + result.extra_descrs = extra_descrs if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE cls._cache[key] = result @@ -176,7 +179,8 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extra_descrs=None): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None @@ -225,7 +229,8 @@ extraeffect, oopspecindex, can_invalidate, - call_release_gil_target) + call_release_gil_target, + extra_descrs) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -360,11 +360,12 @@ lst.append(v) def handle_residual_call(self, op, extraargs=[], may_call_jitcodes=False, - oopspecindex=EffectInfo.OS_NONE): + oopspecindex=EffectInfo.OS_NONE, extradescrs=None): """A direct_call turns into the operation 'residual_call_xxx' if it is calling a function that we don't want to JIT. The initial args of 'residual_call_xxx' are the function to call, and its calldescr.""" - calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex, + extradescrs=extradescrs) op1 = self.rewrite_call(op, 'residual_call', [op.args[0]] + extraargs, calldescr=calldescr) if may_call_jitcodes or self.callcontrol.calldescr_canraise(calldescr): @@ -1619,12 +1620,11 @@ def do_resizable_list__resize_ge(self, op, args, *descrs): index = EffectInfo.OS_LIST_RESIZE_GE - oplist = self.handle_residual_call(op, oopspecindex=index)[0] LIST = args[0].concretetype.TO lengthdescr = self.cpu.fielddescrof(LIST, 'length') arraydescr = self.cpu.arraydescrof(LIST.items.TO) - oplist[0].args += [lengthdescr, arraydescr] - return oplist + return self.handle_residual_call(op, oopspecindex=index, + extradescrs=[lengthdescr, arraydescr]) # ---------- # Strings and Unicodes. diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1204,7 +1204,7 @@ CIF_DESCRIPTION_P) kind, descr, itemsize = get_arg_descr(self.metainterp.cpu, cif_description.rtype) - + if kind != 'v': ofs = cif_description.exchange_result assert ofs % itemsize == 0 # alignment check (result) From noreply at buildbot.pypy.org Thu Jul 18 21:46:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 21:46:09 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: A EU photo ID seems enough for the UK, but it is best to check. Message-ID: <20130718194609.29DA51C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r4995:22883bb1f876 Date: 2013-07-18 21:45 +0200 http://bitbucket.org/pypy/extradoc/changeset/22883bb1f876/ Log: A EU photo ID seems enough for the UK, but it is best to check. diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -72,6 +72,6 @@ http://mail.python.org/mailman/listinfo/pypy-dev Remember that you may need a UK-to-(insert country here) power adapter. -Please also note that UK is not within the Schengen zone, so you need -to bring your passport if coming from any other country. Also, the UK does not -have the Euro. +Please also note that UK is not within the Schengen zone; EU citizens +can usually enter without a passport (a photo ID is enough) but it is +best to check. Also, the UK does not have the Euro. From noreply at buildbot.pypy.org Thu Jul 18 21:48:30 2013 From: noreply at buildbot.pypy.org (matti) Date: Thu, 18 Jul 2013 21:48:30 +0200 (CEST) Subject: [pypy-commit] cffi windows: merge default into branch Message-ID: <20130718194830.B297B1C02BA@cobra.cs.uni-duesseldorf.de> Author: matti Branch: windows Changeset: r1286:887dae2c707c Date: 2013-07-18 22:43 +0300 http://bitbucket.org/cffi/cffi/changeset/887dae2c707c/ Log: merge default into branch diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3885,19 +3885,6 @@ } assert(cf == NULL); -#ifdef USE_C_LIBFFI_MSVC - /* MSVC returns small structures in registers. Pretend int32 or - int64 return type. This is needed as a workaround for what - is really a bug of libffi_msvc seen as an independent library - (ctypes has a similar workaround). */ - if (is_result_type) { - if (ct->ct_size <= 4) - return &ffi_type_sint32; - if (ct->ct_size <= 8) - return &ffi_type_sint64; - } -#endif - /* next, allocate and fill the flattened list */ elements = fb_alloc(fb, (nflat + 1) * sizeof(ffi_type*)); nflat = 0; diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -46,7 +46,7 @@ register ffi_type **p_arg; argp = stack; - if (ecif->cif->rtype->type == FFI_TYPE_STRUCT) + if (ecif->cif->flags == FFI_TYPE_STRUCT) { *(void **) argp = ecif->rvalue; argp += sizeof(void *); @@ -102,6 +102,15 @@ FFI_ASSERT(0); } } +#ifdef _WIN64 + else if (z > 8) + { + /* On Win64, if a single argument takes more than 8 bytes, + then it is always passed by reference. */ + *(void **)argp = *p_argv; + z = 8; + } +#endif else { memcpy(argp, *p_argv, z); @@ -124,7 +133,6 @@ switch (cif->rtype->type) { case FFI_TYPE_VOID: - case FFI_TYPE_STRUCT: case FFI_TYPE_SINT64: case FFI_TYPE_FLOAT: case FFI_TYPE_DOUBLE: @@ -132,6 +140,18 @@ cif->flags = (unsigned) cif->rtype->type; break; + case FFI_TYPE_STRUCT: + /* MSVC returns small structures in registers. Put in cif->flags + the value FFI_TYPE_STRUCT only if the structure is big enough; + otherwise, put the 4- or 8-bytes integer type. */ + if (cif->rtype->size <= 4) + cif->flags = FFI_TYPE_INT; + else if (cif->rtype->size <= 8) + cif->flags = FFI_TYPE_SINT64; + else + cif->flags = FFI_TYPE_STRUCT; + break; + case FFI_TYPE_UINT64: #ifdef _WIN64 case FFI_TYPE_POINTER: @@ -180,7 +200,7 @@ /* value address then we need to make one */ if ((rvalue == NULL) && - (cif->rtype->type == FFI_TYPE_STRUCT)) + (cif->flags == FFI_TYPE_STRUCT)) { /*@-sysunrecog@*/ ecif.rvalue = alloca(cif->rtype->size); @@ -338,7 +358,7 @@ argp = stack; - if ( cif->rtype->type == FFI_TYPE_STRUCT ) { + if ( cif->flags == FFI_TYPE_STRUCT ) { *rvalue = *(void **) argp; argp += 4; } diff --git a/c/libffi_msvc/prep_cif.c b/c/libffi_msvc/prep_cif.c --- a/c/libffi_msvc/prep_cif.c +++ b/c/libffi_msvc/prep_cif.c @@ -116,9 +116,9 @@ #if !defined M68K && !defined __x86_64__ && !defined S390 /* Make space for the return structure pointer */ if (cif->rtype->type == FFI_TYPE_STRUCT - /* MSVC returns small structures in registers. But we have a different - workaround: pretend int32 or int64 return type, and converting to - structure afterwards. */ +#ifdef _WIN32 + && (cif->rtype->size > 8) /* MSVC returns small structs in registers */ +#endif #ifdef SPARC && (cif->abi != FFI_V9 || cif->rtype->size > 32) #endif diff --git a/c/libffi_msvc/win64.obj b/c/libffi_msvc/win64.obj new file mode 100644 index 0000000000000000000000000000000000000000..38d3cd166b0ecad62ea4d9aab86cc574de0c9fe7 GIT binary patch [cut] diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -63,15 +63,15 @@ libraries[:] = [] _filenames = [filename.lower() for filename in os.listdir(COMPILE_LIBFFI)] _filenames = [filename for filename in _filenames - if filename.endswith('.c') or - filename.endswith('.asm')] - if sys.maxsize <= 2**32: - _filenames.remove('win64.asm') - else: + if filename.endswith('.c')] + if sys.maxsize > 2**32: + # 64-bit: unlist win32.c, and add instead win64.obj. If the obj + # happens to get outdated at some point in the future, you need to + # rebuild it manually from win64.asm. _filenames.remove('win32.c') + extra_link_args.append(os.path.join(COMPILE_LIBFFI, 'win64.obj')) sources.extend(os.path.join(COMPILE_LIBFFI, filename) for filename in _filenames) - define_macros.append(('USE_C_LIBFFI_MSVC', '1')) else: use_pkg_config() From noreply at buildbot.pypy.org Thu Jul 18 21:48:31 2013 From: noreply at buildbot.pypy.org (matti) Date: Thu, 18 Jul 2013 21:48:31 +0200 (CEST) Subject: [pypy-commit] cffi windows: two tests fixed, two still crashing (test_callback_returning*struct) Message-ID: <20130718194831.D0AF11C02BA@cobra.cs.uni-duesseldorf.de> Author: matti Branch: windows Changeset: r1287:a7b90c4eab76 Date: 2013-07-18 22:48 +0300 http://bitbucket.org/cffi/cffi/changeset/a7b90c4eab76/ Log: two tests fixed, two still crashing (test_callback_returning*struct) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -977,9 +977,6 @@ py.test.raises(TypeError, f, x[0]) def test_call_function_21(): - import os - if os.name == 'nt': - py.test.skip('crashes the interpreter') BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") complete_struct_or_union(BStruct, [('a', BInt, -1), @@ -999,9 +996,6 @@ assert res == sum(lst) def test_call_function_22(): - import os - if os.name == 'nt': - py.test.skip('crashes the interpreter') BInt = new_primitive_type("int") BArray10 = new_array_type(new_pointer_type(BInt), 10) BStruct = new_struct_type("struct foo") From noreply at buildbot.pypy.org Thu Jul 18 21:59:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 21:59:07 +0200 (CEST) Subject: [pypy-commit] cffi default: Add a test Message-ID: <20130718195907.5F0FC1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1288:2a9609999f81 Date: 2013-07-18 21:58 +0200 http://bitbucket.org/cffi/cffi/changeset/2a9609999f81/ Log: Add a test diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1230,6 +1230,23 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") From noreply at buildbot.pypy.org Thu Jul 18 22:07:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 22:07:07 +0200 (CEST) Subject: [pypy-commit] cffi default: Add three tests about callbacks receiving inlined structure arguments Message-ID: <20130718200707.A38D61C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1289:b3729934bf48 Date: 2013-07-18 22:06 +0200 http://bitbucket.org/cffi/cffi/changeset/b3729934bf48/ Log: Add three tests about callbacks receiving inlined structure arguments diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1230,6 +1230,21 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + def test_callback_returning_tiny_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1247,6 +1262,22 @@ assert s.a == -10 assert s.b == -30 +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1266,6 +1297,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") From noreply at buildbot.pypy.org Thu Jul 18 22:11:08 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 22:11:08 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: implement rewriting for list_resize_ge Message-ID: <20130718201108.63A321C02BA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65468:4da4ff886a5e Date: 2013-07-18 22:10 +0200 http://bitbucket.org/pypy/pypy/changeset/4da4ff886a5e/ Log: implement rewriting for list_resize_ge diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,7 +1,7 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.lltypesystem import llmemory from rpython.jit.metainterp import history -from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr +from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr, BoxInt from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.codewriter import heaptracker from rpython.jit.codewriter.effectinfo import EffectInfo @@ -128,7 +128,22 @@ """ what we want to do is to check the length and than add a conditional call to really resize """ - xxx + extra_info = op.getdescr().get_extra_info() + itemsdescr = extra_info.extra_descrs[0] + arraydescr = extra_info.extra_descrs[1] + func = op.getarg(0) + lst = op.getarg(1) + newsizebox = op.getarg(2) + arrbox = BoxPtr() + arrlenbox = BoxInt() + cond_box = BoxInt() + op0 = ResOperation(rop.GETFIELD_GC, [lst], arrbox, descr=itemsdescr) + op1 = ResOperation(rop.ARRAYLEN_GC, [arrbox], arrlenbox, + descr=arraydescr) + op2 = ResOperation(rop.INT_LT, [arrlenbox, newsizebox], cond_box) + op3 = ResOperation(rop.COND_CALL, [cond_box, func, lst, newsizebox], + None, descr=op.getdescr()) + self.newops += [op0, op1, op2, op3] def handle_new_array(self, arraydescr, op, kind=FLAG_ARRAY): v_length = op.getarg(0) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -1,9 +1,10 @@ from rpython.jit.backend.llsupport.descr import get_size_descr,\ get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ - SizeDescrWithVTable, get_interiorfield_descr + SizeDescrWithVTable, get_interiorfield_descr, get_call_descr from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ GcLLDescr_framework from rpython.jit.backend.llsupport import jitframe +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.gc import get_description from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt.util import equaloplists @@ -87,6 +88,21 @@ casmdescr.compiled_loop_token = clt tzdescr = None # noone cares # + + ARRAY = lltype.GcArray(lltype.Signed) + LIST = lltype.GcStruct('LIST', ('length', lltype.Signed), + ('items', lltype.Ptr(ARRAY))) + itemsdescr = get_field_descr(self.gc_ll_descr, LIST, 'items') + arraydescr = get_array_descr(self.gc_ll_descr, ARRAY) + extrainfo = EffectInfo(None, None, None, None, + extraeffect=EffectInfo.EF_RANDOM_EFFECTS, + oopspecindex=EffectInfo.OS_LIST_RESIZE_GE, + extra_descrs=[itemsdescr, arraydescr]) + list_resize_descr = get_call_descr(self.gc_ll_descr, + [lltype.Ptr(LIST), lltype.Signed], + lltype.Void, extrainfo) + list_resize_ge = lltype.nullptr(ARRAY) # does not matter, not used + namespace.update(locals()) # for funcname in self.gc_ll_descr._generated_functions: @@ -775,4 +791,13 @@ """) def test_rewrite_list_resize_ge(self): - pass + self.check_rewrite(""" + [p0, i0] + call(ConstClass(list_resize_ge), p0, i0, descr=list_resize_descr) + """, """ + [p0, i0] + p1 = getfield_gc(p0, descr=itemsdescr) + i1 = arraylen_gc(p1, descr=arraydescr) + i2 = int_lt(i1, i0) + cond_call(i2, ConstClass(list_resize_ge), p0, i0, descr=list_resize_descr) + """) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1621,10 +1621,10 @@ def do_resizable_list__resize_ge(self, op, args, *descrs): index = EffectInfo.OS_LIST_RESIZE_GE LIST = args[0].concretetype.TO - lengthdescr = self.cpu.fielddescrof(LIST, 'length') + itemsdescr = self.cpu.fielddescrof(LIST, 'items') arraydescr = self.cpu.arraydescrof(LIST.items.TO) return self.handle_residual_call(op, oopspecindex=index, - extradescrs=[lengthdescr, arraydescr]) + extradescrs=[itemsdescr, arraydescr]) # ---------- # Strings and Unicodes. From noreply at buildbot.pypy.org Thu Jul 18 22:15:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 18 Jul 2013 22:15:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Import cffi/b3729934bf48 Message-ID: <20130718201513.B224C1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65469:d2b06cce2209 Date: 2013-07-18 22:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d2b06cce2209/ Log: Import cffi/b3729934bf48 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1219,6 +1219,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1238,6 +1286,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") From noreply at buildbot.pypy.org Thu Jul 18 22:17:24 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 18 Jul 2013 22:17:24 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: I think it's int_ge since the cond_call is executed *when* the condition is true Message-ID: <20130718201724.418CC1C02BA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65470:06fe59099c92 Date: 2013-07-18 22:16 +0200 http://bitbucket.org/pypy/pypy/changeset/06fe59099c92/ Log: I think it's int_ge since the cond_call is executed *when* the condition is true diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -79,11 +79,13 @@ if op.getopnum() == rop.CALL_ASSEMBLER: self.handle_call_assembler(op) continue - if op.getopnum() == rop.CALL: - idx = op.getdescr().get_extra_info().oopspecindex - if idx == EffectInfo.OS_LIST_RESIZE_GE: - self.handle_list_resize_ge(op) - continue + if op.getopnum() == rop.CALL and op.getdescr(): + ei = op.getdescr().get_extra_info() + if ei: + idx = ei.oopspecindex + if idx == EffectInfo.OS_LIST_RESIZE_GE: + self.handle_list_resize_ge(op) + continue # self.newops.append(op) return self.newops @@ -140,7 +142,7 @@ op0 = ResOperation(rop.GETFIELD_GC, [lst], arrbox, descr=itemsdescr) op1 = ResOperation(rop.ARRAYLEN_GC, [arrbox], arrlenbox, descr=arraydescr) - op2 = ResOperation(rop.INT_LT, [arrlenbox, newsizebox], cond_box) + op2 = ResOperation(rop.INT_GE, [arrlenbox, newsizebox], cond_box) op3 = ResOperation(rop.COND_CALL, [cond_box, func, lst, newsizebox], None, descr=op.getdescr()) self.newops += [op0, op1, op2, op3] diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -798,6 +798,6 @@ [p0, i0] p1 = getfield_gc(p0, descr=itemsdescr) i1 = arraylen_gc(p1, descr=arraydescr) - i2 = int_lt(i1, i0) + i2 = int_ge(i1, i0) cond_call(i2, ConstClass(list_resize_ge), p0, i0, descr=list_resize_descr) """) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -802,7 +802,7 @@ assert op.result is None args = op.getarglist() assert 2 <= len(args) <= 4 + 2 - loc_call = self.make_sure_var_in_reg(args[1], args, selected_reg=eax) + loc_call = self.make_sure_var_in_reg(args[1], [], selected_reg=eax) args_so_far = [args[1]] for i in range(2, len(args)): reg = self.rm.register_arguments[i - 2] From noreply at buildbot.pypy.org Fri Jul 19 00:03:11 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 19 Jul 2013 00:03:11 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: This seems to work Message-ID: <20130718220311.9F9791C02BA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65471:8d11a494f1a9 Date: 2013-07-19 00:02 +0200 http://bitbucket.org/pypy/pypy/changeset/8d11a494f1a9/ Log: This seems to work diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -126,27 +126,6 @@ else: self.gen_malloc_fixedsize(size, descr.tid, op.result) - def handle_list_resize_ge(self, op): - """ what we want to do is to check the length and than add a conditional - call to really resize - """ - extra_info = op.getdescr().get_extra_info() - itemsdescr = extra_info.extra_descrs[0] - arraydescr = extra_info.extra_descrs[1] - func = op.getarg(0) - lst = op.getarg(1) - newsizebox = op.getarg(2) - arrbox = BoxPtr() - arrlenbox = BoxInt() - cond_box = BoxInt() - op0 = ResOperation(rop.GETFIELD_GC, [lst], arrbox, descr=itemsdescr) - op1 = ResOperation(rop.ARRAYLEN_GC, [arrbox], arrlenbox, - descr=arraydescr) - op2 = ResOperation(rop.INT_GE, [arrlenbox, newsizebox], cond_box) - op3 = ResOperation(rop.COND_CALL, [cond_box, func, lst, newsizebox], - None, descr=op.getdescr()) - self.newops += [op0, op1, op2, op3] - def handle_new_array(self, arraydescr, op, kind=FLAG_ARRAY): v_length = op.getarg(0) total_size = -1 @@ -245,6 +224,30 @@ self.newops.append(ResOperation(rop.CALL_ASSEMBLER, args, op.result, op.getdescr())) + def handle_list_resize_ge(self, op): + """ what we want to do is to check the length and than add a conditional + call to really resize + """ + extra_info = op.getdescr().get_extra_info() + lendescr = extra_info.extra_descrs[0] + itemsdescr = extra_info.extra_descrs[1] + arraydescr = extra_info.extra_descrs[2] + func = op.getarg(0) + lst = op.getarg(1) + newsizebox = op.getarg(2) + arrbox = BoxPtr() + arrlenbox = BoxInt() + cond_box = BoxInt() + op0 = ResOperation(rop.GETFIELD_GC, [lst], arrbox, descr=itemsdescr) + op1 = ResOperation(rop.ARRAYLEN_GC, [arrbox], arrlenbox, + descr=arraydescr) + op2 = ResOperation(rop.INT_LT, [arrlenbox, newsizebox], cond_box) + op3 = ResOperation(rop.COND_CALL, [cond_box, func, lst, newsizebox], + None, descr=op.getdescr()) + op4 = ResOperation(rop.SETFIELD_GC, [lst, newsizebox], None, + descr=lendescr) + self.newops += [op0, op1, op2, op3, op4] + # ---------- def emitting_an_operation_that_can_collect(self): diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -92,12 +92,13 @@ ARRAY = lltype.GcArray(lltype.Signed) LIST = lltype.GcStruct('LIST', ('length', lltype.Signed), ('items', lltype.Ptr(ARRAY))) + lendescr = get_field_descr(self.gc_ll_descr, LIST, 'length') itemsdescr = get_field_descr(self.gc_ll_descr, LIST, 'items') arraydescr = get_array_descr(self.gc_ll_descr, ARRAY) extrainfo = EffectInfo(None, None, None, None, extraeffect=EffectInfo.EF_RANDOM_EFFECTS, oopspecindex=EffectInfo.OS_LIST_RESIZE_GE, - extra_descrs=[itemsdescr, arraydescr]) + extra_descrs=[lendescr, itemsdescr, arraydescr]) list_resize_descr = get_call_descr(self.gc_ll_descr, [lltype.Ptr(LIST), lltype.Signed], lltype.Void, extrainfo) @@ -798,6 +799,7 @@ [p0, i0] p1 = getfield_gc(p0, descr=itemsdescr) i1 = arraylen_gc(p1, descr=arraydescr) - i2 = int_ge(i1, i0) + i2 = int_lt(i1, i0) cond_call(i2, ConstClass(list_resize_ge), p0, i0, descr=list_resize_descr) + setfield_gc(p0, i0, descr=lendescr) """) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -272,8 +272,10 @@ funcobj = op.args[0].value._obj if funcobj.random_effects_on_gcobjs: return True - except (AttributeError, lltype.DelayedPointer): + except lltype.DelayedPointer: return True # better safe than sorry + except AttributeError: + return False return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1622,9 +1622,11 @@ index = EffectInfo.OS_LIST_RESIZE_GE LIST = args[0].concretetype.TO itemsdescr = self.cpu.fielddescrof(LIST, 'items') + lendescr = self.cpu.fielddescrof(LIST, 'length') arraydescr = self.cpu.arraydescrof(LIST.items.TO) return self.handle_residual_call(op, oopspecindex=index, - extradescrs=[itemsdescr, arraydescr]) + extradescrs=[lendescr, itemsdescr, + arraydescr]) # ---------- # Strings and Unicodes. From noreply at buildbot.pypy.org Fri Jul 19 00:48:43 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 19 Jul 2013 00:48:43 +0200 (CEST) Subject: [pypy-commit] pypy py3k: our marshal's fine, this is also a ValueError on cpython Message-ID: <20130718224843.9E1C11C02BA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65472:be81f420e034 Date: 2013-07-18 15:40 -0700 http://bitbucket.org/pypy/pypy/changeset/be81f420e034/ Log: our marshal's fine, this is also a ValueError on cpython diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py --- a/pypy/module/marshal/test/test_marshal.py +++ b/pypy/module/marshal/test/test_marshal.py @@ -184,7 +184,7 @@ import marshal # Yes, there is code that depends on this :-( raises(EOFError, marshal.loads, b'') - raises(MemoryError, marshal.loads, b'(test)') + raises(ValueError, marshal.loads, b'(test)') class AppTestSmallLong(AppTestMarshal): From noreply at buildbot.pypy.org Fri Jul 19 00:48:44 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 19 Jul 2013 00:48:44 +0200 (CEST) Subject: [pypy-commit] pypy py3k: updates Message-ID: <20130718224844.E6EDB1C02BA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65473:a4f20ef6c665 Date: 2013-07-18 15:43 -0700 http://bitbucket.org/pypy/pypy/changeset/a4f20ef6c665/ Log: updates diff --git a/pypy/TODO b/pypy/TODO --- a/pypy/TODO +++ b/pypy/TODO @@ -1,28 +1,8 @@ TODO for the python3 test suite: -* test_float - nan = float('nan'); assert nan in [nan] - This has always been true in CPython, it is now guaranteed that the - containers use the "is" operator as an optimization. - Difficult in pypy because optimized containers are arrays of - unwrapped doubles. A possible solution is to special-case nan in - FloatListStrategy.unwrap(). - * test_memoryview Needs bytes/str changes. Probably easy. Work for this has begun on - py3k-memoryview (by mjacob) - -* test_pep263 - Tracebacks should be able to print unicode source code. This is - really due to the tokenizer not being fully unicode aware. The - parser can somewhat hack around this but maybe not completely - -* test_sys -* test_threading: - Missing sys.getswitchinterval(). https://bugs.pypy.org/issue1470 - We would be interesting to implement the new thread switching - logic, it's a lot of work though. - + py3k-memoryview (by mjacob) https://bugs.pypy.org/issue1542 own-tests: @@ -34,30 +14,22 @@ structseq now subclasses tuple on py3, which breaks how BaseCpyTypeDescr.realize allocates it -* module.marshal.test.test_marshal - Handling of exceptions w/ bad data? Or is the test wrong? - -* objspace.std.test.test_floatobject test_from_string - The unicode-based number parsing routines don't raise UnicodeErrors, - but more importantly they raise unprintable exceptions - antocuni's older TODO: -run coverage against the parser/astbuilder/astcompiler: it's probably full of +* run coverage against the parser/astbuilder/astcompiler: it's probably full of dead code because the grammar changed -re-enable IntDictStrategy +* re-enable strategies https://bugs.pypy.org/issue1540 : + - re-enable IntDictStrategy + - re-enable StdObjSpace.listview_str + - re-enable the kwargs dict strategy in dictmultiobject.py + - re-enable view_as_kwargs -re-enable StdObjSpace.listview_str +* unskip numpypy tests in module/test_lib_pypy/numpypy/ -re-enable the kwargs dict strategy in dictmultiobject.py -re-enable view_as_kwargs - -unskip numpypy tests in module/test_lib_pypy/numpypy/ - -optimize W_UnicodeObject, right now it stores both an unicode and an utf8 +* optimize W_UnicodeObject, right now it stores both an unicode and an utf8 version of the same string -re-enable BUILD_LIST_FROM_ARG: see the comment in astcompiler/codegen.py in +* re-enable BUILD_LIST_FROM_ARG: see the comment in astcompiler/codegen.py in ast.ListComp.build_container From noreply at buildbot.pypy.org Fri Jul 19 00:48:46 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 19 Jul 2013 00:48:46 +0200 (CEST) Subject: [pypy-commit] pypy py3k: consider the new gil as an impl detail for now Message-ID: <20130718224846.244C71C02BA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65474:e40e1af13fd0 Date: 2013-07-18 15:44 -0700 http://bitbucket.org/pypy/pypy/changeset/e40e1af13fd0/ Log: consider the new gil as an impl detail for now diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -190,6 +190,8 @@ sys.setcheckinterval(n) self.assertEqual(sys.getcheckinterval(), n) + @unittest.skipUnless(hasattr(sys, 'setswitchinterval'), + 'The new GIL is an implementation detail') @unittest.skipUnless(threading, 'Threading required for this test.') def test_switchinterval(self): self.assertRaises(TypeError, sys.setswitchinterval) diff --git a/lib-python/3/test/test_threading.py b/lib-python/3/test/test_threading.py --- a/lib-python/3/test/test_threading.py +++ b/lib-python/3/test/test_threading.py @@ -346,10 +346,15 @@ # Try hard to trigger #1703448: a thread is still returned in # threading.enumerate() after it has been join()ed. enum = threading.enumerate - old_interval = sys.getswitchinterval() + newgil = hasattr(sys, 'getswitchinterval') + if newgil: + geti, seti = sys.getswitchinterval, sys.setswitchinterval + else: + geti, seti = sys.getcheckinterval, sys.setcheckinterval + old_interval = geti() try: for i in range(1, 100): - sys.setswitchinterval(i * 0.0002) + seti(i * 0.0002 if newgil else i // 5) t = threading.Thread(target=lambda: None) t.start() t.join() @@ -357,7 +362,7 @@ self.assertNotIn(t, l, "#1703448 triggered after %d trials: %s" % (i, l)) finally: - sys.setswitchinterval(old_interval) + seti(old_interval) @test.support.cpython_only def test_no_refcycle_through_target(self): From noreply at buildbot.pypy.org Fri Jul 19 01:31:10 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 19 Jul 2013 01:31:10 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt to py3's __hash/eq/cmp__ changes Message-ID: <20130718233110.DE0191C13F9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65475:b8ebf0cf310f Date: 2013-07-18 16:30 -0700 http://bitbucket.org/pypy/pypy/changeset/b8ebf0cf310f/ Log: adapt to py3's __hash/eq/cmp__ changes diff --git a/pypy/objspace/std/test/test_identityset.py b/pypy/objspace/std/test/test_identityset.py --- a/pypy/objspace/std/test/test_identityset.py +++ b/pypy/objspace/std/test/test_identityset.py @@ -23,10 +23,7 @@ class CustomEq(object): def __eq__(self, other): return True - - class CustomCmp (object): - def __cmp__(self, other): - return 0 + __hash__ = object.__hash__ class CustomHash(object): def __hash__(self): @@ -40,7 +37,7 @@ assert self.uses_strategy('IdentitySetStrategy',s) - for cls in [CustomEq,CustomCmp,CustomHash]: + for cls in [CustomEq,CustomHash]: s = set() s.add(cls()) assert not self.uses_strategy('IdentitySetStrategy',s) @@ -64,6 +61,7 @@ class NotIdent(object): def __eq__(self,other): pass + __hash__ = object.__hash__ s = set([X(),X()]) s.add('foo') From noreply at buildbot.pypy.org Fri Jul 19 07:34:50 2013 From: noreply at buildbot.pypy.org (matti) Date: Fri, 19 Jul 2013 07:34:50 +0200 (CEST) Subject: [pypy-commit] cffi windows: merge default into branch Message-ID: <20130719053450.E19C21C14B6@cobra.cs.uni-duesseldorf.de> Author: matti Branch: windows Changeset: r1290:a6aff480fc4d Date: 2013-07-19 08:16 +0300 http://bitbucket.org/cffi/cffi/changeset/a6aff480fc4d/ Log: merge default into branch diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1230,6 +1230,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): import os if os.name == 'nt': @@ -1252,6 +1300,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): import os if os.name == 'nt': From noreply at buildbot.pypy.org Fri Jul 19 07:34:51 2013 From: noreply at buildbot.pypy.org (matti) Date: Fri, 19 Jul 2013 07:34:51 +0200 (CEST) Subject: [pypy-commit] cffi windows: fix Message-ID: <20130719053451.F2FC51C14B6@cobra.cs.uni-duesseldorf.de> Author: matti Branch: windows Changeset: r1291:05896a3af290 Date: 2013-07-19 08:34 +0300 http://bitbucket.org/cffi/cffi/changeset/05896a3af290/ Log: fix diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -534,13 +534,13 @@ for c_type, expected_size in [ ('char', 1), ('unsigned int', 4), - ('char *', SIZE_OF_LONG), + ('char *', SIZE_OF_PTR), ('int[5]', 20), ('struct foo', 12), ('union foo', 4), ]: size = ffi.sizeof(c_type) - assert size == expected_size + assert size == expected_size,c_type def test_sizeof_cdata(self): ffi = FFI(backend=self.Backend()) From noreply at buildbot.pypy.org Fri Jul 19 07:57:37 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 07:57:37 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: add stm_dbg_get_hdr_str() that prints the flags and tid of an object Message-ID: <20130719055737.2F2F91C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r414:560fad6b4f7f Date: 2013-07-19 07:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/560fad6b4f7f/ Log: add stm_dbg_get_hdr_str() that prints the flags and tid of an object diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -516,7 +516,7 @@ t = (nodeptr)read_barrier(ptrs[i]); w = t->weakref; if(w) { - ww = stm_read_barrier(w); + ww = (weaknodeptr)stm_read_barrier((gcptr)w); assert(stm_get_tid((gcptr)ww) == GCTID_WEAKREF); if (ww->node) { check((gcptr)ww->node); diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -6,6 +6,28 @@ */ #include "stmimpl.h" +#ifdef _GC_DEBUG +char tmp_buf[128]; +char* stm_dbg_get_hdr_str(gcptr obj) +{ + char *cur; + char *flags[] = GC_FLAG_NAMES; + int i; + + i = 0; + cur = tmp_buf; + while (flags[i]) { + if (obj->h_tid & (STM_FIRST_GCFLAG << i)) { + cur += sprintf(cur, "%s|", flags[i]); + } + i++; + } + cur += sprintf(cur, "tid=%ld\n", stm_get_tid(obj)); + return tmp_buf; +} +#endif + + __thread struct tx_descriptor *thread_descriptor = NULL; diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -200,4 +200,7 @@ void DescriptorInit(void); void DescriptorDone(void); +#ifdef _GC_DEBUG +char* stm_dbg_get_hdr_str(gcptr obj); +#endif #endif /* _ET_H */ From noreply at buildbot.pypy.org Fri Jul 19 10:21:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 10:21:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: Avoid doing changes in this debug-only function Message-ID: <20130719082113.74D9F1C0149@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r415:7e7b074f25af Date: 2013-07-19 10:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/7e7b074f25af/ Log: Avoid doing changes in this debug-only function diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -545,10 +545,15 @@ d->num_read_objects_known_old); assert(gcptrlist_size(&d->private_from_protected) >= d->num_private_from_protected_known_old); +#if 0 + /* we could here force the following, but there is little point + and it's a bad idea to do things in this function that is + compiled only in debug mode */ d->num_read_objects_known_old = gcptrlist_size(&d->list_of_read_objects); d->num_private_from_protected_known_old = gcptrlist_size(&d->private_from_protected); +#endif return 0; } else { From noreply at buildbot.pypy.org Fri Jul 19 10:21:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 10:21:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: An extra test, with explanation Message-ID: <20130719082114.90D5A1C0149@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r416:6ce90865cdee Date: 2013-07-19 10:21 +0200 http://bitbucket.org/pypy/stmgc/changeset/6ce90865cdee/ Log: An extra test, with explanation diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -200,6 +200,41 @@ check_not_free(p2) assert classify(p2) == "private" +def test_old_private_from_protected_to_young_private_2(): + p0 = nalloc_refs(1) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + lib.setptr(p0, 0, ffi.NULL) + assert classify(p0) == "private_from_protected" + assert lib.in_nursery(p0) # a young private_from_protected + # + lib.stm_push_root(p0) + minor_collect() + p0 = lib.stm_pop_root() + assert classify(p0) == "private_from_protected" + assert not lib.in_nursery(p0) # becomes an old private_from_protected + # + # Because it's a private_from_protected, its h_revision is a pointer + # to the backup copy, and not stm_private_rev_num. It means that the + # write barrier will always enter its slow path, even though the + # GCFLAG_WRITE_BARRIER is not set. + assert p0.h_revision != lib.get_private_rev_num() + assert not (p0.h_tid & GCFLAG_WRITE_BARRIER) + # + p1 = nalloc(HDR) + lib.setptr(p0, 0, p1) # should trigger the write barrier again + assert classify(p0) == "private_from_protected" + lib.stm_push_root(p0) + minor_collect() + p0b = lib.stm_pop_root() + assert p0b == p0 + check_nursery_free(p1) + assert classify(p0) == "private_from_protected" + p2 = lib.getptr(p0, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + assert classify(p2) == "private" + def test_new_version(): p1 = oalloc(HDR) assert lib.stm_write_barrier(p1) == p1 From noreply at buildbot.pypy.org Fri Jul 19 10:33:05 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 19 Jul 2013 10:33:05 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: mention the hosts Message-ID: <20130719083305.3838B1C303C@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r4996:761ec0103b43 Date: 2013-07-18 19:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/761ec0103b43/ Log: mention the hosts diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -43,10 +43,13 @@ ------------ The sprint will happen within a room of `King's College's`_ `Strand -Campus`_ in Central London, UK. +Campus`_ in Central London, UK. We are being hosted by `Laurie Tratt`_ and the +`Software Development Team`_. .. _`King's College`: http://www.kcl.ac.uk/ .. _`Strand Campus`: http://goo.gl/maps/Qz0zz +.. _`Laurie Tratt`: http://tratt.net/laurie +.. _`Software Development Team`: http://soft-dev.org ------------ Demo Morning From noreply at buildbot.pypy.org Fri Jul 19 11:20:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 11:20:41 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add comments about the barrier placement Message-ID: <20130719092041.570C11C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r417:6bde91a8f3f9 Date: 2013-07-19 11:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/6bde91a8f3f9/ Log: Add comments about the barrier placement diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -54,11 +54,19 @@ int stm_enter_callback_call(void); void stm_leave_callback_call(int); -/* read/write barriers (the most general versions only for now) */ -#if 0 // (optimized version below) -gcptr stm_read_barrier(gcptr); -gcptr stm_write_barrier(gcptr); -#endif +/* read/write barriers (the most general versions only for now). + + - the read barrier must be applied before reading from an object. + the result is valid as long as we're in the same transaction, + and stm_write_barrier() is not called on the same object. + + - the write barrier must be applied before writing to an object. + the result is valid for a shorter period of time: we have to + do stm_write_barrier() again if we ended the transaction, or + if we did a potential collection (e.g. stm_allocate()). +*/ +static inline gcptr stm_read_barrier(gcptr); +static inline gcptr stm_write_barrier(gcptr); /* start a new transaction, calls callback(), and when it returns finish that transaction. callback() is called with the 'arg' From noreply at buildbot.pypy.org Fri Jul 19 11:21:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 11:21:30 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Adpat writebarrier.py to the precise current set of requirements. Message-ID: <20130719092130.57A541C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65476:21348e84ad9a Date: 2013-07-19 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/21348e84ad9a/ Log: Adpat writebarrier.py to the precise current set of requirements. diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -24,7 +24,7 @@ res = self.interpret(f1, [-5]) assert res == 42 assert len(self.writemode) == 0 - assert self.barriers == ['G2R'] + assert self.barriers == ['P2R'] def test_simple_write(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -37,7 +37,7 @@ self.interpret(f1, [4]) assert x1.foo == 4 assert len(self.writemode) == 1 - assert self.barriers == ['G2W'] + assert self.barriers == ['P2W'] def test_multiple_reads(self): X = lltype.GcStruct('X', ('foo', lltype.Signed), @@ -58,7 +58,7 @@ res = self.interpret(f1, [4]) assert res == -81 assert len(self.writemode) == 0 - assert self.barriers == ['G2R'] + assert self.barriers == ['P2R'] def test_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -82,10 +82,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['P2R', 'P2W', 'o2r'] + assert self.barriers == ['P2R', 'P2W', 'p2r'] res = self.interpret(f1, [x, x]) assert res == 42 - assert self.barriers == ['P2R', 'P2W', 'O2R'] + assert self.barriers == ['P2R', 'P2W', 'P2R'] def test_write_cannot_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -227,18 +227,31 @@ x.foo = 815 x.zbar = 'A' external_stuff() - result = x.foo - if isinstance(x, Y): - result += x.ybar + result = x.foo # 1 + if isinstance(x, Y): # 2 + result += x.ybar # 3 return result res = self.interpret(f1, [10]) assert res == 42 + 10 - assert self.barriers == ['p2r', 'p2r'] # from two blocks (could be - # optimized later) + assert self.barriers == ['p2r', 'p2r', 'p2r'] # from 3 blocks (could be + # optimized later) res = self.interpret(f1, [-10]) assert res == 815 - assert self.barriers == ['p2r'] + assert self.barriers == ['p2r', 'p2r'] + + def test_write_barrier_repeated(self): + class X: + pass + x = X() + def f1(i): + x.a = i # write barrier + y = X() # malloc + x.a += 1 # write barrier again + return y + + res = self.interpret(f1, [10]) + assert self.barriers == ['P2W', 'r2w'] external_stuff = rffi.llexternal('external_stuff', [], lltype.Void, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -2,7 +2,7 @@ from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache from rpython.translator.stm.transform import STMTransformer -from rpython.translator.stm.writebarrier import MORE_PRECISE_CATEGORIES +from rpython.translator.stm.writebarrier import NEEDS_BARRIER from rpython.conftest import option @@ -29,13 +29,13 @@ self.writemode = set() self.barriers = [] - def get_category(self, p): + def get_category_or_null(self, p): if isinstance(p, _stmptr): return p._category if not p: return 'N' if p._solid: - return 'G' # allocated with immortal=True + return 'P' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) def interpret(self, fn, args): @@ -71,19 +71,19 @@ if isinstance(value, _stmptr): yield value - def get_category(self, p): - return self.llinterpreter.tester.get_category(p) + def get_category_or_null(self, p): + return self.llinterpreter.tester.get_category_or_null(p) def check_category(self, p, expected): - cat = self.get_category(p) - assert cat in MORE_PRECISE_CATEGORIES[expected] + cat = self.get_category_or_null(p) + assert cat in 'NPRW' return cat def op_stm_barrier(self, kind, obj): frm, middledigit, to = kind assert middledigit == '2' cat = self.check_category(obj, frm) - if cat in MORE_PRECISE_CATEGORIES[to]: + if not NEEDS_BARRIER[cat, to]: # a barrier, but with no effect self.llinterpreter.tester.barriers.append(kind.lower()) return obj @@ -109,10 +109,10 @@ def op_setfield(self, obj, fieldname, fieldvalue): if not obj._TYPE.TO._immutable_field(fieldname): self.check_category(obj, 'W') - # convert R -> O all other pointers to the same object we can find + # convert R -> P all other pointers to the same object we can find for p in self.all_stm_ptrs(): if p._category == 'R' and p._T == obj._T and p == obj: - _stmptr._category.__set__(p, 'O') + _stmptr._category.__set__(p, 'P') return LLFrame.op_setfield(self, obj, fieldname, fieldvalue) def op_cast_pointer(self, RESTYPE, obj): diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -4,6 +4,7 @@ from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.stm.threadlocalref import transform_tlref from rpython.translator.c.support import log +from rpython.memory.gctransform.framework import CollectAnalyzer class STMTransformer(object): @@ -26,9 +27,11 @@ def transform_write_barrier(self): self.write_analyzer = WriteAnalyzer(self.translator) + self.collect_analyzer = CollectAnalyzer(self.translator) for graph in self.translator.graphs: insert_stm_barrier(self, graph) del self.write_analyzer + del self.collect_analyzer def transform_turn_inevitable(self): for graph in self.translator.graphs: diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -9,14 +9,14 @@ 'malloc_nonmovable', 'malloc_nonmovable_varsize', ]) -MORE_PRECISE_CATEGORIES = { - 'P': 'PGORLWN', # Pointer: the most general category - 'G': 'GN', # Global: known to be a non-local pointer - 'O': 'ORLWN', # Old: used to be read-ready, but maybe someone wrote - 'R': 'RLWN', # Read-ready: direct reads from there are ok - 'L': 'LWN', # Local: a local pointer - 'W': 'WN', # Write-ready: direct writes here are ok - 'N': 'N'} # NULL (the other categories also all contain NULL) +NEEDS_BARRIER = { + ('P', 'R'): True, + ('P', 'W'): True, + ('R', 'R'): False, + ('R', 'W'): True, + ('W', 'R'): False, + ('W', 'W'): False, + } def unwraplist(list_v): for v in list_v: @@ -44,14 +44,20 @@ def insert_stm_barrier(stmtransformer, graph): + """This function uses the following characters for 'categories': + + * 'P': a general pointer + * 'R': the read barrier was applied + * 'W': the write barrier was applied + """ graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) def get_category(v): - if isinstance(v, Constant): - if v.value: - return 'G' - else: - return 'N' # NULL + return category.get(v, 'P') + + def get_category_or_null(v): + if isinstance(v, Constant) and not v.value: + return 'N' return category.get(v, 'P') def renamings_get(v): @@ -82,7 +88,7 @@ op.result.concretetype is not lltype.Void and op.args[0].concretetype.TO._gckind == 'gc' and True): #not is_immutable(op)): XXX see [1] - wants_a_barrier.setdefault(op, 'R') + wants_a_barrier[op] = 'R' elif (op.opname in ('setfield', 'setarrayitem', 'setinteriorfield') and op.args[-1].concretetype is not lltype.Void and @@ -113,7 +119,7 @@ v_holder = renamings.setdefault(v, [v]) v = v_holder[0] frm = get_category(v) - if frm not in MORE_PRECISE_CATEGORIES[to]: + if NEEDS_BARRIER[frm, to]: c_info = Constant('%s2%s' % (frm, to), lltype.Void) w = varoftype(v.concretetype) newop = SpaceOperation('stm_barrier', [c_info, v], w) @@ -127,9 +133,9 @@ newoperations.append(newop) # if op in expand_comparison: - cats = ''.join([get_category(v) for v in newop.args]) - if ('N' not in cats and - cats not in ('LL', 'LW', 'WL', 'WW')): + cats = (get_category_or_null(newop.args[0]), + get_category_or_null(newop.args[1])) + if 'N' not in cats and cats != ('W', 'W'): if newop.opname == 'ptr_ne': v = varoftype(lltype.Bool) negop = SpaceOperation('bool_not', [v], @@ -137,18 +143,32 @@ newoperations.append(negop) newop.result = v newop.opname = 'stm_ptr_eq' - # + + if stmtransformer.collect_analyzer.analyze(op): + # this operation can collect: we bring all 'W' + # categories back to 'R', because we would need + # another stm_write_barrier on them afterwards + for v, cat in category.items(): + if cat == 'W': + category[v] = 'R' + effectinfo = stmtransformer.write_analyzer.analyze( op, graphinfo=graphinfo) if effectinfo: if effectinfo is top_set: - category.clear() + # this operation can perform random writes: any + # 'R'-category object falls back to 'P' because + # we would need another stm_read_barrier() + for v, cat in category.items(): + if cat == 'R': + category[v] = 'P' else: + # the same, but only on objects of the right types types = set([entry[1] for entry in effectinfo]) for v in category.keys(): if v.concretetype in types and category[v] == 'R': - category[v] = 'O' - # + category[v] = 'P' + if op.opname in MALLOCS: category[op.result] = 'W' From noreply at buildbot.pypy.org Fri Jul 19 12:02:43 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 19 Jul 2013 12:02:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: fixes proposed by Laurie Message-ID: <20130719100243.74C711C0149@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r4997:08ffac1f4bfa Date: 2013-07-19 12:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/08ffac1f4bfa/ Log: fixes proposed by Laurie diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -43,20 +43,23 @@ ------------ The sprint will happen within a room of `King's College's`_ `Strand -Campus`_ in Central London, UK. We are being hosted by `Laurie Tratt`_ and the -`Software Development Team`_. +Campus`_ in `Central London, UK`_. There are some travel instructions `how to +get there`_. We are being hosted by `Laurence Tratt`_ and the `Software +Development Team`_. .. _`King's College`: http://www.kcl.ac.uk/ -.. _`Strand Campus`: http://goo.gl/maps/Qz0zz -.. _`Laurie Tratt`: http://tratt.net/laurie +.. _`Central London, UK`: http://goo.gl/maps/Qz0zz +.. _`Strand Campus`: http://www.kcl.ac.uk/campuslife/campuses/strand/StrandCampusLocation.aspx +.. _`how to get there`: http://www.kcl.ac.uk/campuslife/campuses/directions/strand.aspx +.. _`Laurence Tratt`: http://tratt.net/laurie .. _`Software Development Team`: http://soft-dev.org ------------ -Demo Morning +Demo Session ------------ If you don't want to come to the full sprint, but still want to chat a -bit, we are planning to have a demo morning on Tuesday August 27. We +bit, we are planning to have a demo session on Tuesday August 27. We will announce this separately on the blog. If you are interested, please leave a comment. @@ -74,7 +77,9 @@ http://mail.python.org/mailman/listinfo/pypy-dev -Remember that you may need a UK-to-(insert country here) power adapter. -Please also note that UK is not within the Schengen zone; EU citizens -can usually enter without a passport (a photo ID is enough) but it is -best to check. Also, the UK does not have the Euro. +Remember that you may need a (insert country here)-to-UK power adapter. +Please note that UK is not within the Schengen zone, so non-EU and +non-Switzerland citizens may require specific visa. Please check `travel +regulations`_. Also, the UK uses pound sterling (GBP). + +.. _`travel regulations`: http://www.ukba.homeoffice.gov.uk/visas-immigration/do-you-need-a-visa/ From noreply at buildbot.pypy.org Fri Jul 19 12:06:53 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 12:06:53 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: more debug output Message-ID: <20130719100653.ED3AC1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r418:983bb16c726a Date: 2013-07-19 11:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/983bb16c726a/ Log: more debug output diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -16,13 +16,14 @@ i = 0; cur = tmp_buf; + cur += sprintf(cur, "%p:", obj); while (flags[i]) { if (obj->h_tid & (STM_FIRST_GCFLAG << i)) { cur += sprintf(cur, "%s|", flags[i]); } i++; } - cur += sprintf(cur, "tid=%ld\n", stm_get_tid(obj)); + cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); return tmp_buf; } #endif diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -27,10 +27,11 @@ if (obj->h_tid & GCFLAG_IMMUTABLE) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); if (obj->h_tid & GCFLAG_PUBLIC) { - /* young public */ + /* young public, replace with stolen old copy */ assert(obj->h_tid & GCFLAG_NURSERY_MOVED); assert(IS_POINTER(obj->h_revision)); stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ goto done; } @@ -52,10 +53,10 @@ if (!(obj->h_original)) obj->h_original = (revision_t)O; } + obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); obj->h_revision = (revision_t)O; O->h_tid |= GCFLAG_PUBLIC; - obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); /* here it is fine if it stays in read caches because the object is immutable anyway and there are no write_barriers allowed. */ diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -41,9 +41,13 @@ if (is_in_nursery(d, pointing_to)) { if (pointing_to->h_tid & GCFLAG_NURSERY_MOVED) { + dprintf(("weakref ptr moved %p->%p\n", + WEAKREF_PTR(weakref, size), + (gcptr)pointing_to->h_revision)); WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision; } else { + dprintf(("weakref lost ptr %p\n", WEAKREF_PTR(weakref, size))); WEAKREF_PTR(weakref, size) = NULL; continue; /* no need to remember this weakref any longer */ } @@ -143,6 +147,10 @@ assert(pointing_to != NULL); if (is_partially_visited(pointing_to)) { pointing_to = stmgcpage_visit(pointing_to); + dprintf(("mweakref ptr moved %p->%p\n", + WEAKREF_PTR(weakref, size), + pointing_to)); + assert(pointing_to->h_tid & GCFLAG_VISITED); WEAKREF_PTR(weakref, size) = pointing_to; } @@ -169,6 +177,7 @@ if (pointing_to->h_tid & GCFLAG_VISITED) { continue; /* the target stays alive, the weakref remains */ } + dprintf(("mweakref lost ptr %p\n", WEAKREF_PTR(weakref, size))); WEAKREF_PTR(weakref, size) = NULL; /* the target dies */ } /* remove this weakref from the list */ From noreply at buildbot.pypy.org Fri Jul 19 12:06:55 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 12:06:55 +0200 (CEST) Subject: [pypy-commit] stmgc default: Backed out changeset: 191c168da60e (readding of objects during minor collections to old_objects_to_trace) Message-ID: <20130719100655.06DC11C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r419:ff208391e85c Date: 2013-07-19 11:51 +0200 http://bitbucket.org/pypy/stmgc/changeset/ff208391e85c/ Log: Backed out changeset: 191c168da60e (readding of objects during minor collections to old_objects_to_trace) diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -125,9 +125,6 @@ } /************************************************************/ -/* list for private/protected, old roots that need to be - kept in old_objects_to_trace */ -static __thread struct GcPtrList private_or_protected_roots = {0, 0, NULL}; static inline gcptr create_old_object_copy(gcptr obj) { @@ -207,22 +204,6 @@ (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ visit_if_young(end); - item = *end; - /* if private or protected, this object needs to be - traced again in the next minor_collect if it is - currently in old_objects_to_trace. Because then - it may be seen as write-ready in the view of - someone: - pw = write_barrier(); push_root(pw); - minor_collect(); pw = pop_root(); // pw still write-ready - */ - if (item - && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in - obj_to_trace*/ - && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED - || item->h_revision == stm_private_rev_num)) { - gcptrlist_insert(&private_or_protected_roots, item); - } } else if (item != NULL) { if (item == END_MARKER_OFF) @@ -545,15 +526,10 @@ d->num_read_objects_known_old); assert(gcptrlist_size(&d->private_from_protected) >= d->num_private_from_protected_known_old); -#if 0 - /* we could here force the following, but there is little point - and it's a bad idea to do things in this function that is - compiled only in debug mode */ d->num_read_objects_known_old = gcptrlist_size(&d->list_of_read_objects); d->num_private_from_protected_known_old = gcptrlist_size(&d->private_from_protected); -#endif return 0; } else { diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -205,6 +205,7 @@ assert list_of_read_objects() == [p2] def test_write_barrier_after_minor_collect(): + # should fail p = oalloc_refs(1) pw = lib.stm_write_barrier(p) @@ -220,8 +221,10 @@ assert pw.h_tid & GCFLAG_OLD rawsetptr(pw, 0, r) - # pw needs to be readded to old_objects_to_trace - # before the next minor gc in order for this test to pass + # pw not in old_objects_to_trace. A + # repeated write_barrier before + # rawsetptr() would fix that + lib.stm_push_root(r) minor_collect() minor_collect() @@ -232,24 +235,13 @@ pr = lib.stm_read_barrier(p) assert r != r2 + # these will fail because pw/pr was + # not traced in the last minor_collect, + # because they were not registered in + # old_objects_to_trace. assert getptr(pr, 0) != r assert getptr(pr, 0) == r2 - # the following shouldn't be done - # because pw was not saved. Just - # here to check that pw gets removed - # from old_objects_to_trace when not found - # on the root stack anymore - rawsetptr(pw, 0, q) - lib.stm_push_root(q) - minor_collect() - q2 = lib.stm_pop_root() - check_nursery_free(q) - pr = lib.stm_read_barrier(p) - assert q != q2 - assert getptr(pr, 0) == q - assert getptr(pr, 0) != q2 - def test_write_barrier_after_minor_collect_young_to_old(): p = nalloc_refs(1) pw = lib.stm_write_barrier(p) diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -75,7 +75,7 @@ void _list_append(DuListObject *ob, DuObject *x) { - _du_write1(ob); + _du_read1(ob); DuTupleObject *olditems = ob->ob_tuple; _du_read1(olditems); @@ -85,6 +85,8 @@ DuTupleObject *newitems = DuTuple_New(newcount); _du_restore3(ob, x, olditems); + _du_write1(ob); + for (i=0; iob_items[i] = olditems->ob_items[i]; newitems->ob_items[newcount-1] = x; From noreply at buildbot.pypy.org Fri Jul 19 12:06:56 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 12:06:56 +0200 (CEST) Subject: [pypy-commit] stmgc default: more backout Message-ID: <20130719100656.0AE111C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r420:d7b329c4c608 Date: 2013-07-19 11:52 +0200 http://bitbucket.org/pypy/stmgc/changeset/d7b329c4c608/ Log: more backout diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -358,19 +358,6 @@ stmgc_trace(obj, &visit_if_young); } - - while (gcptrlist_size(&private_or_protected_roots) > 0) { - gcptr obj = gcptrlist_pop(&private_or_protected_roots); - /* if it has the write_barrier flag, clear it so that - it doesn't get inserted twice by a later write-barrier */ - if (obj->h_tid & GCFLAG_WRITE_BARRIER) { - /* only insert those that were in old_obj_to_trace - and that we didn't insert already */ - obj->h_tid &= ~GCFLAG_WRITE_BARRIER; - gcptrlist_insert(&d->old_objects_to_trace, obj); - dprintf(("re-add %p to old_objects_to_trace\n", obj)); - } - } } static void fix_list_of_read_objects(struct tx_descriptor *d) From noreply at buildbot.pypy.org Fri Jul 19 12:06:57 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 12:06:57 +0200 (CEST) Subject: [pypy-commit] stmgc default: demo_random should not use the writeables cache anymore, since that is invalid again Message-ID: <20130719100657.0F2811C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r421:5f6c08c9274a Date: 2013-07-19 12:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/5f6c08c9274a/ Log: demo_random should not use the writeables cache anymore, since that is invalid again diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -53,11 +53,6 @@ time_t default_seed; gcptr shared_roots[SHARED_ROOTS]; -#define CACHE_MASK 65535 -#define CACHE_ENTRIES ((CACHE_MASK + 1) / sizeof(char *)) -#define CACHE_AT(cache, obj) (*(gcptr *)((char *)(cache) \ - + ((revision_t)(obj) & CACHE_MASK))) - struct thread_data { unsigned int thread_seed; gcptr roots[MAXROOTS]; @@ -67,7 +62,6 @@ int steps_left; int interruptible; int atomic; - revision_t writeable[CACHE_ENTRIES]; }; __thread struct thread_data td; @@ -137,7 +131,7 @@ return x; } -void push_roots(int with_cache) +void push_roots() { int i; for (i = 0; i < td.num_roots; i++) { @@ -145,30 +139,11 @@ if (td.roots[i]) stm_push_root(td.roots[i]); } - - if (with_cache) { - stm_push_root(NULL); - for (i = 0; i < CACHE_ENTRIES; i++) { - if (td.writeable[i]) - stm_push_root((gcptr)td.writeable[i]); - } - } } -void pop_roots(int with_cache) +void pop_roots() { int i; - /* some objects may have changed positions */ - memset(td.writeable, 0, sizeof(td.writeable)); - - if (with_cache) { - gcptr obj = stm_pop_root(); - while (obj) { - CACHE_AT(td.writeable, obj) = obj; - obj = stm_pop_root(); - } - } - for (i = td.num_roots - 1; i >= 0; i--) { if (td.roots[i]) td.roots[i] = stm_pop_root(); @@ -186,9 +161,9 @@ nodeptr allocate_node() { nodeptr r; - push_roots(1); + push_roots(); r = (nodeptr)stm_allocate(sizeof(struct node), GCTID_STRUCT_NODE); - pop_roots(1); + pop_roots(); return r; } @@ -252,7 +227,6 @@ if (p != NULL) { check(p); w = stm_write_barrier(p); - CACHE_AT(td.writeable, w) = w; check(w); assert(is_private(w)); } @@ -369,22 +343,22 @@ { int k = get_rand(100); if (k < 10) { - push_roots(1); + push_roots(); stm_push_root(p); stm_become_inevitable("fun"); p = stm_pop_root(); - pop_roots(1); + pop_roots(); } else if (k < 40) { - push_roots(1); + push_roots(); stmgc_minor_collect(); - pop_roots(1); + pop_roots(); p = NULL; } else if (k < 41 && DO_MAJOR_COLLECTS) { fprintf(stdout, "major collect\n"); - push_roots(1); + push_roots(); stmgcpage_possibly_major_collect(1); - pop_roots(1); + pop_roots(); p = NULL; } return p; @@ -423,10 +397,7 @@ break; case 7: // set 'p' as *next in one of the roots check(_r); - if (CACHE_AT(td.writeable, _r) == _r) - w_r = (nodeptr)_r; - else - w_r = (nodeptr)write_barrier(_r); + w_r = (nodeptr)write_barrier(_r); check((gcptr)w_r); check(p); w_r->next = (struct node*)p; @@ -485,10 +456,7 @@ assert(w_t->id == stm_id((gcptr)_t)); } else { - if (CACHE_AT(td.writeable, _t) == _t) - w_t = (nodeptr)_t; - else - w_t = (nodeptr)write_barrier(_t); + w_t = (nodeptr)write_barrier(_t); w_t->id = stm_id((gcptr)w_t); assert(w_t->id == stm_id((gcptr)_t)); } @@ -504,10 +472,7 @@ assert(w_t->hash == stm_hash((gcptr)_t)); } else { - if (CACHE_AT(td.writeable, _t) == _t) - w_t = (nodeptr)_t; - else - w_t = (nodeptr)write_barrier(_t); + w_t = (nodeptr)write_barrier(_t); w_t->hash = stm_hash((gcptr)w_t); assert(w_t->hash == stm_hash((gcptr)_t)); } @@ -563,7 +528,7 @@ void transaction_break() { - push_roots(0); + push_roots(); td.interruptible = 1; copy_roots(td.roots, td.roots_outside_perform, td.num_roots); @@ -575,9 +540,7 @@ copy_roots(td.roots_outside_perform, td.roots, td.num_roots); td.interruptible = 0; - pop_roots(0); - - /* done by pop_roots() memset(&td.writeable, 0, sizeof(td.writeable)); */ + pop_roots(); } @@ -592,8 +555,8 @@ assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF); arg1 = stm_pop_root(); assert(arg1 == NULL); - pop_roots(0); - push_roots(0); + pop_roots(); + push_roots(); stm_push_root(arg1); stm_push_root(end_marker); @@ -609,9 +572,6 @@ { gcptr p = NULL; - // clear cache of writeables: - memset(&td.writeable, 0, sizeof(td.writeable)); - while (td.steps_left-->0 || td.atomic) { if (td.steps_left % 8 == 0) fprintf(stdout, "#"); diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -206,6 +206,7 @@ def test_write_barrier_after_minor_collect(): # should fail + py.test.skip("should fail now") p = oalloc_refs(1) pw = lib.stm_write_barrier(p) @@ -243,6 +244,7 @@ assert getptr(pr, 0) == r2 def test_write_barrier_after_minor_collect_young_to_old(): + py.test.skip("should fail now") p = nalloc_refs(1) pw = lib.stm_write_barrier(p) From noreply at buildbot.pypy.org Fri Jul 19 12:34:30 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 19 Jul 2013 12:34:30 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Call directly the resize (avoids double calling). Add a jit target that I'm Message-ID: <20130719103430.2645E1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65477:80cf2e2836ff Date: 2013-07-19 12:33 +0200 http://bitbucket.org/pypy/pypy/changeset/80cf2e2836ff/ Log: Call directly the resize (avoids double calling). Add a jit target that I'm testing. diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -229,10 +229,15 @@ call to really resize """ extra_info = op.getdescr().get_extra_info() + if extra_info.extra_descrs is None: + # this is for tests only, actually never happens + self.newops.append(op) + return lendescr = extra_info.extra_descrs[0] itemsdescr = extra_info.extra_descrs[1] arraydescr = extra_info.extra_descrs[2] - func = op.getarg(0) + resize_ptr = ConstInt(extra_info.extra_descrs[3].getint()) + calldescr = extra_info.extra_descrs[4] lst = op.getarg(1) newsizebox = op.getarg(2) arrbox = BoxPtr() @@ -242,8 +247,9 @@ op1 = ResOperation(rop.ARRAYLEN_GC, [arrbox], arrlenbox, descr=arraydescr) op2 = ResOperation(rop.INT_LT, [arrlenbox, newsizebox], cond_box) - op3 = ResOperation(rop.COND_CALL, [cond_box, func, lst, newsizebox], - None, descr=op.getdescr()) + op3 = ResOperation(rop.COND_CALL, [cond_box, resize_ptr, lst, + newsizebox], + None, descr=calldescr) op4 = ResOperation(rop.SETFIELD_GC, [lst, newsizebox], None, descr=lendescr) self.newops += [op0, op1, op2, op3, op4] diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -5,7 +5,7 @@ from rpython.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from rpython.jit.codewriter.policy import log from rpython.jit.metainterp import quasiimmut -from rpython.jit.metainterp.history import getkind +from rpython.jit.metainterp.history import getkind, AbstractDescr from rpython.jit.metainterp.typesystem import deref, arrayItem from rpython.jit.metainterp.blackhole import BlackholeInterpreter from rpython.flowspace.model import SpaceOperation, Variable, Constant, c_last_exception @@ -17,6 +17,15 @@ from rpython.translator.simplify import get_funcobj from rpython.translator.unsimplify import varoftype +class IntDescr(AbstractDescr): + """ Disguise int as a descr + """ + def __init__(self, v): + self.v = v + + def getint(self): + return self.v + class UnsupportedMallocFlags(Exception): pass @@ -1624,9 +1633,16 @@ itemsdescr = self.cpu.fielddescrof(LIST, 'items') lendescr = self.cpu.fielddescrof(LIST, 'length') arraydescr = self.cpu.arraydescrof(LIST.items.TO) + oopspec = "list.resize_hint_really" + c_func, TP = support.builtin_func_for_spec(self.cpu.rtyper, + oopspec, [lltype.Ptr(LIST), lltype.Signed], lltype.Void) + op1 = SpaceOperation('direct_call', [c_func] + args, op.result) + calldescr = self.callcontrol.getcalldescr(op1) + extradescrs = [lendescr, itemsdescr, arraydescr, + IntDescr(rffi.cast(lltype.Signed, c_func.value)), + calldescr] return self.handle_residual_call(op, oopspecindex=index, - extradescrs=[lendescr, itemsdescr, - arraydescr]) + extradescrs=extradescrs) # ---------- # Strings and Unicodes. diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -8,6 +8,7 @@ from rpython.rlib.jit import elidable, oopspec from rpython.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask from rpython.rtyper import rlist +from rpython.rtyper.lltypesystem import rlist as rlist_ll from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.llinterp import LLInterpreter @@ -74,7 +75,7 @@ # another block, so the set of alive_v will be different. methname = op.args[0].value assert methname == 'jit_merge_point', ( - "reds='auto' is supported only for jit drivers which " + "reds='auto' is supported only for jit drivers which " "calls only jit_merge_point. Found a call to %s" % methname) # # compute the set of live variables across the jit_marker @@ -212,6 +213,8 @@ _ll_5_list_ll_arraycopy = rgc.ll_arraycopy +def _ll_2_list_resize_hint_really(l, newsize): + rlist_ll._ll_list_resize_hint_really(l, newsize, True) @elidable def _ll_1_gc_identityhash(x): diff --git a/rpython/translator/goal/targetjitstandalone.py b/rpython/translator/goal/targetjitstandalone.py new file mode 100644 --- /dev/null +++ b/rpython/translator/goal/targetjitstandalone.py @@ -0,0 +1,34 @@ + +""" Only the JIT +""" + +from rpython.rlib import jit +from rpython.jit.codewriter.policy import JitPolicy + +driver = jit.JitDriver(greens = [], reds = 'auto') + +def main(count): + i = 0 + l = [] + while i < count: + driver.jit_merge_point() + l.append(i) + i += 1 + return l + +def entry_point(argv): + if len(argv) < 3: + print "Usage: jitstandalone " + count1 = int(argv[1]) + count2 = int(argv[2]) + s = 0 + for i in range(count1): + s += len(main(count2)) + print s + return 0 + +def target(*args): + return entry_point, None + +def jitpolicy(driver): + return JitPolicy() From noreply at buildbot.pypy.org Fri Jul 19 12:42:11 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 19 Jul 2013 12:42:11 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: fix test_rewrite Message-ID: <20130719104211.2E2871C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65478:afb489f5629a Date: 2013-07-19 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/afb489f5629a/ Log: fix test_rewrite diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -9,7 +9,7 @@ from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.codewriter.heaptracker import register_known_gctype -from rpython.jit.metainterp.history import JitCellToken, FLOAT +from rpython.jit.metainterp.history import JitCellToken, FLOAT, ConstInt from rpython.rtyper.lltypesystem import lltype, rclass, rffi from rpython.jit.backend.x86.arch import WORD @@ -95,14 +95,16 @@ lendescr = get_field_descr(self.gc_ll_descr, LIST, 'length') itemsdescr = get_field_descr(self.gc_ll_descr, LIST, 'items') arraydescr = get_array_descr(self.gc_ll_descr, ARRAY) + resize_ptr = ConstInt(123) extrainfo = EffectInfo(None, None, None, None, extraeffect=EffectInfo.EF_RANDOM_EFFECTS, oopspecindex=EffectInfo.OS_LIST_RESIZE_GE, - extra_descrs=[lendescr, itemsdescr, arraydescr]) + extra_descrs=[lendescr, itemsdescr, arraydescr, + resize_ptr]) list_resize_descr = get_call_descr(self.gc_ll_descr, [lltype.Ptr(LIST), lltype.Signed], lltype.Void, extrainfo) - list_resize_ge = lltype.nullptr(ARRAY) # does not matter, not used + extrainfo.extra_descrs.append(list_resize_descr) namespace.update(locals()) # @@ -794,12 +796,12 @@ def test_rewrite_list_resize_ge(self): self.check_rewrite(""" [p0, i0] - call(ConstClass(list_resize_ge), p0, i0, descr=list_resize_descr) + call(121, p0, i0, descr=list_resize_descr) """, """ [p0, i0] p1 = getfield_gc(p0, descr=itemsdescr) i1 = arraylen_gc(p1, descr=arraydescr) i2 = int_lt(i1, i0) - cond_call(i2, ConstClass(list_resize_ge), p0, i0, descr=list_resize_descr) + cond_call(i2, 123, p0, i0, descr=list_resize_descr) setfield_gc(p0, i0, descr=lendescr) """) From noreply at buildbot.pypy.org Fri Jul 19 12:44:35 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 12:44:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: add another test that passes, not sure why Message-ID: <20130719104435.271111C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r422:1e4740b610b6 Date: 2013-07-19 12:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/1e4740b610b6/ Log: add another test that passes, not sure why diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -235,6 +235,47 @@ check_not_free(p2) assert classify(p2) == "private" +def test_old_private_from_protected_to_young_private_3(): + p0 = palloc_refs(1) + pw = lib.stm_write_barrier(p0) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + pr = lib.stm_read_barrier(p0) + assert classify(pr) == "protected" + assert lib.in_nursery(pr) # a young protected + # + minor_collect() + pr = lib.stm_read_barrier(p0) + pw = lib.stm_write_barrier(pr) + lib.setptr(pw, 0, ffi.NULL) + assert classify(pw) == "private_from_protected" + assert not lib.in_nursery(pw) + # + # Because it was protected young before, it has no WRITE_BARRIER + # flag. After transforming it to a PRIV_FROM_PROT, the following + # holds: + # its h_revision is a pointer to the backup copy, and not + # stm_private_rev_num. It means that the write barrier will + # always enter its slow path, even though the GCFLAG_WRITE_BARRIER + # is not set. + assert pw.h_revision != lib.get_private_rev_num() + assert not (pw.h_tid & GCFLAG_WRITE_BARRIER) + # # + lib.stm_push_root(pw) + minor_collect() + p1 = nalloc(HDR) + pw = lib.stm_pop_root() + lib.setptr(pw, 0, p1) # should trigger the write barrier again + assert classify(pr) == "private_from_protected" + minor_collect() + check_nursery_free(p1) + pr = lib.stm_read_barrier(p0) + assert classify(pr) == "private_from_protected" + p2 = lib.getptr(pr, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + assert classify(p2) == "private" + def test_new_version(): p1 = oalloc(HDR) assert lib.stm_write_barrier(p1) == p1 From noreply at buildbot.pypy.org Fri Jul 19 12:58:18 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 19 Jul 2013 12:58:18 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: improve the heuristic and fix some tests Message-ID: <20130719105818.BA1331C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65479:07f3a1f54312 Date: 2013-07-19 12:57 +0200 http://bitbucket.org/pypy/pypy/changeset/07f3a1f54312/ Log: improve the heuristic and fix some tests diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -106,7 +106,8 @@ kind='unicode') else: self.malloc_slowpath_unicode = None - self.cond_call_slowpath = self._build_cond_call_slowpath() + self.cond_call_slowpath = [self._build_cond_call_slowpath(False), + self._build_cond_call_slowpath(True)] self._build_stack_check_slowpath() self._build_release_gil(gc_ll_descr.gcrootmap) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -149,18 +149,17 @@ mc.RET() self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) - def _build_cond_call_slowpath(self): + def _build_cond_call_slowpath(self, supports_floats): """ This builds a general call slowpath, for whatever call happens to come. """ mc = codebuf.MachineCodeBlockWrapper() - self._push_all_regs_to_frame(mc, [], self.cpu.supports_floats, - callee_only=False) + self._push_all_regs_to_frame(mc, [], supports_floats, callee_only=False) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_header_shadowstack(mc, gcrootmap) mc.SUB(esp, imm(WORD)) - # first arg is always in edi + # args are in their respective positions mc.CALL(eax) mc.ADD(esp, imm(WORD)) if gcrootmap and gcrootmap.is_shadow_stack: @@ -2150,7 +2149,11 @@ self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() self.push_gcmap(self.mc, gcmap, store=True) - self.mc.CALL(imm(self.cond_call_slowpath)) + if self._regalloc is not None and self._regalloc.xrm.reg_bindings: + cond_call_adr = self.cond_call_slowpath[1] + else: + cond_call_adr = self.cond_call_slowpath[0] + self.mc.CALL(imm(cond_call_adr)) self.pop_gcmap(self.mc) # never any result value offset = self.mc.get_relative_pos() - jmp_adr diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -86,7 +86,7 @@ OS_JIT_FORCE_VIRTUAL = 120 OS_LIST_RESIZE_GE = 130 - OS_LIST_RESIZE_LE = 130 + OS_LIST_RESIZE_LE = 131 # for debugging: _OS_CANRAISE = set([ diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1638,8 +1638,9 @@ oopspec, [lltype.Ptr(LIST), lltype.Signed], lltype.Void) op1 = SpaceOperation('direct_call', [c_func] + args, op.result) calldescr = self.callcontrol.getcalldescr(op1) + addr = llmemory.cast_ptr_to_adr(c_func.value) extradescrs = [lendescr, itemsdescr, arraydescr, - IntDescr(rffi.cast(lltype.Signed, c_func.value)), + IntDescr(heaptracker.adr2int(addr)), calldescr] return self.handle_residual_call(op, oopspecindex=index, extradescrs=extradescrs) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -49,7 +49,7 @@ class FakeCPU: class tracker: pass - + def __init__(self, rtyper): rtyper._builtin_func_for_spec_cache = FakeDict() self.rtyper = rtyper @@ -71,7 +71,8 @@ callinfocollection = FakeCallInfoCollection() def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=None, extraeffect=None, + extradescrs=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -35,7 +35,7 @@ class FakeCPU: class tracker: pass - + rtyper = FakeRTyper() def calldescrof(self, FUNC, ARGS, RESULT): return ('calldescr', FUNC, ARGS, RESULT) @@ -60,7 +60,7 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return 'calldescr' def calldescr_canraise(self, calldescr): return True @@ -77,7 +77,7 @@ class FakeResidualIndirectCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return 'calldescr' def calldescr_canraise(self, calldescr): return True @@ -87,7 +87,7 @@ return 'regular' def graphs_from(self, op): return ['somegraph1', 'somegraph2'] - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return 'calldescr' def get_jitcode(self, graph, called_from=None): assert graph in ('somegraph1', 'somegraph2') diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -250,7 +250,7 @@ class FakeCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return FakeDescr() def calldescr_canraise(self, calldescr): return True From noreply at buildbot.pypy.org Fri Jul 19 13:54:26 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 19 Jul 2013 13:54:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: attendees, not attendants Message-ID: <20130719115426.8FCD51C02BA@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r4998:bb3112fd409f Date: 2013-07-19 13:54 +0200 http://bitbucket.org/pypy/extradoc/changeset/bb3112fd409f/ Log: attendees, not attendants diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -26,7 +26,7 @@ * STM and STM-related topics -* anything else attendants are interested in +* anything else attendees are interested in ----------- Exact times From noreply at buildbot.pypy.org Fri Jul 19 13:54:47 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 13:54:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: understand and fix tests Message-ID: <20130719115447.E61DF1C02BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r423:dad61e32d0b3 Date: 2013-07-19 13:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/dad61e32d0b3/ Log: understand and fix tests diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -406,7 +406,7 @@ static void teardown_minor_collect(struct tx_descriptor *d) { - //assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -201,6 +201,7 @@ assert classify(p2) == "private" def test_old_private_from_protected_to_young_private_2(): + py.test.skip("not valid") p0 = nalloc_refs(1) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() @@ -245,26 +246,27 @@ assert lib.in_nursery(pr) # a young protected # minor_collect() + # each minor collect adds WRITE_BARRIER to protected/private + # objects it moves out of the nursery pr = lib.stm_read_barrier(p0) + assert pr.h_tid & GCFLAG_WRITE_BARRIER pw = lib.stm_write_barrier(pr) + # added to old_obj_to_trace + assert not (pw.h_tid & GCFLAG_WRITE_BARRIER) + lib.setptr(pw, 0, ffi.NULL) assert classify(pw) == "private_from_protected" assert not lib.in_nursery(pw) - # - # Because it was protected young before, it has no WRITE_BARRIER - # flag. After transforming it to a PRIV_FROM_PROT, the following - # holds: - # its h_revision is a pointer to the backup copy, and not - # stm_private_rev_num. It means that the write barrier will - # always enter its slow path, even though the GCFLAG_WRITE_BARRIER - # is not set. + assert pw.h_revision != lib.get_private_rev_num() assert not (pw.h_tid & GCFLAG_WRITE_BARRIER) # # + lib.stm_push_root(pw) minor_collect() p1 = nalloc(HDR) pw = lib.stm_pop_root() + assert pw.h_tid & GCFLAG_WRITE_BARRIER lib.setptr(pw, 0, p1) # should trigger the write barrier again assert classify(pr) == "private_from_protected" minor_collect() From noreply at buildbot.pypy.org Fri Jul 19 13:57:03 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Fri, 19 Jul 2013 13:57:03 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: changed return type of unwrap positive 32 bit int from int to r_uint Message-ID: <20130719115703.E0AEF1C0E1B@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r505:cb9c370c7dae Date: 2013-07-19 11:52 +0000 http://bitbucket.org/pypy/lang-smalltalk/changeset/cb9c370c7dae/ Log: changed return type of unwrap positive 32 bit int from int to r_uint diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -81,7 +81,8 @@ else: return result except error.PrimitiveFailedError: - print '\t-> failed' + if IProxy.trace_proxy: + print '\t-> failed' IProxy.failed() from rpython.rlib.objectmodel import we_are_translated if not we_are_translated(): @@ -417,7 +418,8 @@ @expose_on_virtual_machine_proxy([oop], int) def positive32BitValueOf(n): - return IProxy.space.unwrap_positive_32bit_int(n) + from rpython.rlib.rarithmetic import intmask + return intmask(IProxy.space.unwrap_positive_32bit_int(n)) # /* InterpreterProxy methodsFor: 'special objects' */ diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -287,9 +287,9 @@ def unwrap_positive_32bit_int(self, w_value): if isinstance(w_value, model.W_SmallInteger): if w_value.value >= 0: - return w_value.value + return r_uint(w_value.value) elif isinstance(w_value, model.W_LargePositiveInteger1Word): - return w_value.value + return r_uint(w_value.value) raise UnwrappingError("Wrong types or negative SmallInteger.") def unwrap_char(self, w_char): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -204,7 +204,7 @@ @expose_primitive(code, unwrap_spec=[pos_32bit_int, pos_32bit_int]) def func(interp, s_frame, receiver, argument): res = op(receiver, argument) - return interp.space.wrap_positive_32bit_int(res) + return interp.space.wrap_positive_32bit_int(rarithmetic.intmask(res)) make_func(op) # #/ -- return the result of a division, only succeed if the division is exact From noreply at buildbot.pypy.org Fri Jul 19 13:57:05 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Fri, 19 Jul 2013 13:57:05 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added caching for dlsym function names to speed up dynamically loaded named primitives Message-ID: <20130719115705.07AF91C0E1B@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r506:231c592b8c33 Date: 2013-07-19 11:53 +0000 http://bitbucket.org/pypy/lang-smalltalk/changeset/231c592b8c33/ Log: added caching for dlsym function names to speed up dynamically loaded named primitives diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -1031,12 +1031,16 @@ else: module = self.loaded_modules[module_name] - try: - _external_function = dlsym(module, function_name) - except KeyError: - raise ProxyFunctionFailed + if function_name in module[1]: + return module[1][function_name] else: - return _external_function + try: + _external_function = dlsym(module[0], function_name) + except KeyError: + raise ProxyFunctionFailed + else: + module[1][function_name] = _external_function + return _external_function def initialize_from_call(self, signature, interp, s_frame, argcount, s_method): @@ -1130,8 +1134,9 @@ print "Failed initialization of: %s" % module_name raise error.PrimitiveFailedError - self.loaded_modules[module_name] = module - return module + module_tuple = (module, {}) + self.loaded_modules[module_name] = module_tuple + return module_tuple except error.PrimitiveFailedError: dlclose(module) raise From noreply at buildbot.pypy.org Fri Jul 19 13:57:06 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Fri, 19 Jul 2013 13:57:06 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added FILL primitive (145) Message-ID: <20130719115706.1E28D1C0E1B@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r507:f48f9c043b3e Date: 2013-07-19 11:53 +0000 http://bitbucket.org/pypy/lang-smalltalk/changeset/f48f9c043b3e/ Log: added FILL primitive (145) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1028,6 +1028,7 @@ VM_PATH = 142 SHORT_AT = 143 SHORT_AT_PUT = 144 +FILL = 145 CLONE = 148 @expose_primitive(VM_PATH, unwrap_spec=[object]) @@ -1048,6 +1049,20 @@ raise PrimitiveFailedError return w_receiver.short_atput0(interp.space, n0, w_value) + at expose_primitive(FILL, unwrap_spec=[object, pos_32bit_int]) +def func(interp, s_frame, w_arg, new_value): + space = interp.space + if isinstance(w_arg, model.W_BytesObject): + if new_value > 255: + raise PrimitiveFailedError + for i in xrange(w_arg.size()): + w_arg.setchar(i, chr(new_value)) + elif isinstance(w_arg, model.W_PointersObject) or isinstance(w_arg, model.W_DisplayBitmap): + for i in xrange(w_arg.size()): + w_arg.setword(i, new_value) + else: + raise PrimitiveFailedError + return w_arg @expose_primitive(CLONE, unwrap_spec=[object]) def func(interp, s_frame, w_arg): From noreply at buildbot.pypy.org Fri Jul 19 14:35:37 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 14:35:37 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: merge default Message-ID: <20130719123537.BD4251C303C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r424:e9a1be2c8ade Date: 2013-07-19 14:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/e9a1be2c8ade/ Log: merge default diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -72,11 +72,6 @@ time_t default_seed; gcptr shared_roots[SHARED_ROOTS]; -#define CACHE_MASK 65535 -#define CACHE_ENTRIES ((CACHE_MASK + 1) / sizeof(char *)) -#define CACHE_AT(cache, obj) (*(gcptr *)((char *)(cache) \ - + ((revision_t)(obj) & CACHE_MASK))) - struct thread_data { unsigned int thread_seed; gcptr roots[MAXROOTS]; @@ -86,7 +81,6 @@ int steps_left; int interruptible; int atomic; - revision_t writeable[CACHE_ENTRIES]; }; __thread struct thread_data td; @@ -94,7 +88,7 @@ // helper functions int classify(gcptr p); void check(gcptr p); - +int in_nursery(gcptr obj); static int is_private(gcptr P) { return (P->h_revision == stm_private_rev_num) || @@ -171,7 +165,7 @@ return x; } -void push_roots(int with_cache) +void push_roots() { int i; for (i = 0; i < td.num_roots; i++) { @@ -179,30 +173,11 @@ if (td.roots[i]) stm_push_root(td.roots[i]); } - - if (with_cache) { - stm_push_root(NULL); - for (i = 0; i < CACHE_ENTRIES; i++) { - if (td.writeable[i]) - stm_push_root((gcptr)td.writeable[i]); - } - } } -void pop_roots(int with_cache) +void pop_roots() { int i; - /* some objects may have changed positions */ - memset(td.writeable, 0, sizeof(td.writeable)); - - if (with_cache) { - gcptr obj = stm_pop_root(); - while (obj) { - CACHE_AT(td.writeable, obj) = obj; - obj = stm_pop_root(); - } - } - for (i = td.num_roots - 1; i >= 0; i--) { if (td.roots[i]) td.roots[i] = stm_pop_root(); @@ -220,9 +195,9 @@ nodeptr allocate_node() { nodeptr r; - push_roots(1); + push_roots(); r = (nodeptr)stm_allocate(sizeof(struct node), GCTID_STRUCT_NODE); - pop_roots(1); + pop_roots(); return r; } @@ -281,8 +256,7 @@ if (p->h_original && !(p->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { // must point to valid old object gcptr id = (gcptr)p->h_original; - assert(id->h_tid & GCFLAG_OLD); - check_not_free(id); + assert(!in_nursery(id)); #ifdef _GC_DEBUG if (!is_shared_prebuilt(id) && !(id->h_tid & GCFLAG_PREBUILT)) assert(!is_free_old(id)); @@ -308,7 +282,6 @@ if (p != NULL) { check(p); w = stm_write_barrier(p); - CACHE_AT(td.writeable, w) = w; check(w); assert(is_private(w)); } @@ -425,22 +398,22 @@ { int k = get_rand(100); if (k < 10) { - push_roots(1); + push_roots(); stm_push_root(p); stm_become_inevitable("fun"); p = stm_pop_root(); - pop_roots(1); + pop_roots(); } else if (k < 40) { - push_roots(1); + push_roots(); stmgc_minor_collect(); - pop_roots(1); + pop_roots(); p = NULL; } else if (k < 41 && DO_MAJOR_COLLECTS) { fprintf(stdout, "major collect\n"); - push_roots(1); + push_roots(); stmgcpage_possibly_major_collect(1); - pop_roots(1); + pop_roots(); p = NULL; } return p; @@ -479,10 +452,7 @@ break; case 7: // set 'p' as *next in one of the roots check(_r); - if (CACHE_AT(td.writeable, _r) == _r) - w_r = (nodeptr)_r; - else - w_r = (nodeptr)write_barrier(_r); + w_r = (nodeptr)write_barrier(_r); check((gcptr)w_r); check(p); w_r->next = (struct node*)p; @@ -582,10 +552,7 @@ assert(w_t->id == stm_id((gcptr)_t)); } else { - if (CACHE_AT(td.writeable, _t) == _t) - w_t = (nodeptr)_t; - else - w_t = (nodeptr)write_barrier(_t); + w_t = (nodeptr)write_barrier(_t); w_t->id = stm_id((gcptr)w_t); assert(w_t->id == stm_id((gcptr)_t)); } @@ -601,10 +568,7 @@ assert(w_t->hash == stm_hash((gcptr)_t)); } else { - if (CACHE_AT(td.writeable, _t) == _t) - w_t = (nodeptr)_t; - else - w_t = (nodeptr)write_barrier(_t); + w_t = (nodeptr)write_barrier(_t); w_t->hash = stm_hash((gcptr)w_t); assert(w_t->hash == stm_hash((gcptr)_t)); } @@ -656,7 +620,7 @@ void transaction_break() { - push_roots(0); + push_roots(); td.interruptible = 1; copy_roots(td.roots, td.roots_outside_perform, td.num_roots); @@ -668,9 +632,7 @@ copy_roots(td.roots_outside_perform, td.roots, td.num_roots); td.interruptible = 0; - pop_roots(0); - - /* done by pop_roots() memset(&td.writeable, 0, sizeof(td.writeable)); */ + pop_roots(); } @@ -685,8 +647,8 @@ assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF); arg1 = stm_pop_root(); assert(arg1 == NULL); - pop_roots(0); - push_roots(0); + pop_roots(); + push_roots(); stm_push_root(arg1); stm_push_root(end_marker); @@ -702,9 +664,6 @@ { gcptr p = NULL; - // clear cache of writeables: - memset(&td.writeable, 0, sizeof(td.writeable)); - while (td.steps_left-->0 || td.atomic) { if (td.steps_left % 8 == 0) fprintf(stdout, "#"); diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1115,7 +1115,7 @@ #endif L->h_revision = new_revision; - gcptr stub = stm_stub_malloc(d->public_descriptor); + gcptr stub = stm_stub_malloc(d->public_descriptor, 0); stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -107,10 +107,12 @@ else { /* must create shadow original object XXX: or use backup, if exists */ - - /* XXX use stmgcpage_malloc() directly, we don't need to copy - * the contents yet */ - gcptr O = stmgc_duplicate_old(p); + gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p)); + memcpy(O, p, stmgc_size(p)); /* at least major collections + depend on some content of id_copy. + remove after fixing that XXX */ + O->h_tid |= GCFLAG_OLD; + p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -133,9 +133,6 @@ } /************************************************************/ -/* list for private/protected, old roots that need to be - kept in old_objects_to_trace */ -static __thread struct GcPtrList private_or_protected_roots = {0, 0, NULL}; static inline gcptr create_old_object_copy(gcptr obj) { @@ -215,22 +212,6 @@ (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ visit_if_young(end); - item = *end; - /* if private or protected, this object needs to be - traced again in the next minor_collect if it is - currently in old_objects_to_trace. Because then - it may be seen as write-ready in the view of - someone: - pw = write_barrier(); push_root(pw); - minor_collect(); pw = pop_root(); // pw still write-ready - */ - if (item - && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in - obj_to_trace*/ - && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED - || item->h_revision == stm_private_rev_num)) { - gcptrlist_insert(&private_or_protected_roots, item); - } } else if (item != NULL) { if (item == END_MARKER_OFF) @@ -385,19 +366,6 @@ stmgc_trace(obj, &visit_if_young); } - - while (gcptrlist_size(&private_or_protected_roots) > 0) { - gcptr obj = gcptrlist_pop(&private_or_protected_roots); - /* if it has the write_barrier flag, clear it so that - it doesn't get inserted twice by a later write-barrier */ - if (obj->h_tid & GCFLAG_WRITE_BARRIER) { - /* only insert those that were in old_obj_to_trace - and that we didn't insert already */ - obj->h_tid &= ~GCFLAG_WRITE_BARRIER; - gcptrlist_insert(&d->old_objects_to_trace, obj); - dprintf(("re-add %p to old_objects_to_trace\n", obj)); - } - } } static void fix_list_of_read_objects(struct tx_descriptor *d) @@ -446,7 +414,7 @@ static void teardown_minor_collect(struct tx_descriptor *d) { - //assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -1,11 +1,13 @@ #include "stmimpl.h" -gcptr stm_stub_malloc(struct tx_public_descriptor *pd) +gcptr stm_stub_malloc(struct tx_public_descriptor *pd, size_t minsize) { assert(pd->collection_lock != 0); + if (minsize < sizeof(struct stm_stub_s)) + minsize = sizeof(struct stm_stub_s); - gcptr p = stmgcpage_malloc(sizeof(struct stm_stub_s)); + gcptr p = stmgcpage_malloc(minsize); STUB_THREAD(p) = pd; return p; } @@ -85,8 +87,20 @@ assert(stub->h_revision == (((revision_t)obj) | 2)); goto done; - not_found: - stub = stm_stub_malloc(sd->foreign_pd); + not_found:; + size_t size = 0; + if (!obj->h_original && !(obj->h_tid & GCFLAG_OLD)) { + /* There shouldn't be a public, young object without + a h_original. But there can be priv/protected ones. + We have a young protected copy without an h_original + The stub we allocate will be the h_original, but + it must be big enough to be copied over by a major + collection later. */ + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + + size = stmgc_size(obj); + } + stub = stm_stub_malloc(sd->foreign_pd, size); stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; @@ -98,10 +112,9 @@ stub->h_original = (revision_t)obj; } else { - /* There shouldn't be a public, young object without - a h_original. But there can be protected ones. */ - assert(!(obj->h_tid & GCFLAG_PUBLIC)); - obj->h_original = (revision_t)stub; + /* this is the big-stub case described above */ + obj->h_original = (revision_t)stub; + stub->h_original = 0; /* stub_malloc does not set to 0... */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { ((gcptr)obj->h_revision)->h_original = (revision_t)stub; } diff --git a/c4/steal.h b/c4/steal.h --- a/c4/steal.h +++ b/c4/steal.h @@ -9,7 +9,7 @@ #define STUB_THREAD(h) (((struct stm_stub_s *)(h))->s_thread) -gcptr stm_stub_malloc(struct tx_public_descriptor *); +gcptr stm_stub_malloc(struct tx_public_descriptor *, size_t minsize); void stm_steal_stub(gcptr); gcptr stm_get_stolen_obj(long index); /* debugging */ void stm_normalize_stolen_objects(struct tx_descriptor *); diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -42,8 +42,10 @@ _Bool stm_pointer_equal(gcptr, gcptr); /* to push/pop objects into the local shadowstack */ -static inline void stm_push_root(gcptr); -static inline gcptr stm_pop_root(void); +#if 0 // (optimized version below) +void stm_push_root(gcptr); +gcptr stm_pop_root(void); +#endif /* initialize/deinitialize the stm framework in the current thread */ void stm_initialize(void); @@ -55,15 +57,25 @@ int stm_enter_callback_call(void); void stm_leave_callback_call(int); -/* read/write barriers (the most general versions only for now) */ +/* read/write barriers (the most general versions only for now). + + - the read barrier must be applied before reading from an object. + the result is valid as long as we're in the same transaction, + and stm_write_barrier() is not called on the same object. + + - the write barrier must be applied before writing to an object. + the result is valid for a shorter period of time: we have to + do stm_write_barrier() again if we ended the transaction, or + if we did a potential collection (e.g. stm_allocate()). +*/ static inline gcptr stm_read_barrier(gcptr); static inline gcptr stm_write_barrier(gcptr); /* start a new transaction, calls callback(), and when it returns finish that transaction. callback() is called with the 'arg' provided, and with a retry_counter number. Must save roots around - this call. If callback() returns a value > 0, it is called - again. */ + this call. The callback() is called repeatedly as long as it + returns a value > 0. */ void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int)); /* finish the current transaction, start a new one, or turn the current diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -205,6 +205,8 @@ assert list_of_read_objects() == [p2] def test_write_barrier_after_minor_collect(): + # should fail + py.test.skip("should fail now") p = oalloc_refs(1) pw = lib.stm_write_barrier(p) @@ -220,8 +222,10 @@ assert pw.h_tid & GCFLAG_OLD rawsetptr(pw, 0, r) - # pw needs to be readded to old_objects_to_trace - # before the next minor gc in order for this test to pass + # pw not in old_objects_to_trace. A + # repeated write_barrier before + # rawsetptr() would fix that + lib.stm_push_root(r) minor_collect() minor_collect() @@ -232,6 +236,10 @@ pr = lib.stm_read_barrier(p) assert r != r2 + # these will fail because pw/pr was + # not traced in the last minor_collect, + # because they were not registered in + # old_objects_to_trace. assert getptr(pr, 0) != r assert getptr(pr, 0) == r2 @@ -251,6 +259,7 @@ assert getptr(pr, 0) != q2 def test_write_barrier_after_minor_collect_young_to_old(): + py.test.skip("should fail now") p = nalloc_refs(1) pw = lib.stm_write_barrier(p) diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -200,6 +200,84 @@ check_not_free(p2) assert classify(p2) == "private" +def test_old_private_from_protected_to_young_private_2(): + py.test.skip("not valid") + p0 = nalloc_refs(1) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + lib.setptr(p0, 0, ffi.NULL) + assert classify(p0) == "private_from_protected" + assert lib.in_nursery(p0) # a young private_from_protected + # + lib.stm_push_root(p0) + minor_collect() + p0 = lib.stm_pop_root() + assert classify(p0) == "private_from_protected" + assert not lib.in_nursery(p0) # becomes an old private_from_protected + # + # Because it's a private_from_protected, its h_revision is a pointer + # to the backup copy, and not stm_private_rev_num. It means that the + # write barrier will always enter its slow path, even though the + # GCFLAG_WRITE_BARRIER is not set. + assert p0.h_revision != lib.get_private_rev_num() + assert not (p0.h_tid & GCFLAG_WRITE_BARRIER) + # + p1 = nalloc(HDR) + lib.setptr(p0, 0, p1) # should trigger the write barrier again + assert classify(p0) == "private_from_protected" + lib.stm_push_root(p0) + minor_collect() + p0b = lib.stm_pop_root() + assert p0b == p0 + check_nursery_free(p1) + assert classify(p0) == "private_from_protected" + p2 = lib.getptr(p0, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + assert classify(p2) == "private" + +def test_old_private_from_protected_to_young_private_3(): + p0 = palloc_refs(1) + pw = lib.stm_write_barrier(p0) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + pr = lib.stm_read_barrier(p0) + assert classify(pr) == "protected" + assert lib.in_nursery(pr) # a young protected + # + minor_collect() + # each minor collect adds WRITE_BARRIER to protected/private + # objects it moves out of the nursery + pr = lib.stm_read_barrier(p0) + assert pr.h_tid & GCFLAG_WRITE_BARRIER + pw = lib.stm_write_barrier(pr) + # added to old_obj_to_trace + assert not (pw.h_tid & GCFLAG_WRITE_BARRIER) + + lib.setptr(pw, 0, ffi.NULL) + assert classify(pw) == "private_from_protected" + assert not lib.in_nursery(pw) + + assert pw.h_revision != lib.get_private_rev_num() + assert not (pw.h_tid & GCFLAG_WRITE_BARRIER) + # # + + lib.stm_push_root(pw) + minor_collect() + p1 = nalloc(HDR) + pw = lib.stm_pop_root() + assert pw.h_tid & GCFLAG_WRITE_BARRIER + lib.setptr(pw, 0, p1) # should trigger the write barrier again + assert classify(pr) == "private_from_protected" + minor_collect() + check_nursery_free(p1) + pr = lib.stm_read_barrier(p0) + assert classify(pr) == "private_from_protected" + p2 = lib.getptr(pr, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + assert classify(p2) == "private" + def test_new_version(): p1 = oalloc(HDR) assert lib.stm_write_barrier(p1) == p1 diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -75,7 +75,7 @@ void _list_append(DuListObject *ob, DuObject *x) { - _du_write1(ob); + _du_read1(ob); DuTupleObject *olditems = ob->ob_tuple; _du_read1(olditems); @@ -85,6 +85,8 @@ DuTupleObject *newitems = DuTuple_New(newcount); _du_restore3(ob, x, olditems); + _du_write1(ob); + for (i=0; iob_items[i] = olditems->ob_items[i]; newitems->ob_items[newcount-1] = x; From noreply at buildbot.pypy.org Fri Jul 19 14:35:38 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 14:35:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: a bit of documentation Message-ID: <20130719123538.C8EB61C303C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r425:6e3e8f9d5e30 Date: 2013-07-19 14:35 +0200 http://bitbucket.org/pypy/stmgc/changeset/6e3e8f9d5e30/ Log: a bit of documentation diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -293,3 +293,63 @@ The backup copy of a GCFLAG_PRIVATE_FROM_PROTECTED copy is allocated old, and explicitly freed when the thread commits (unless it was stolen). + + + +The role of GCFLAG_WRITE_BARRIER +-------------------------------- + +This flag's purpose is to mark old objects that, if written to again, +should be traced because they may contain young pointers again. So +for example: +| p = old protected obj with WRITE_BARRIER +| pw = stm_write_barrier(p) +| pw->field = young private obj +The WB-flag tells a write_barrier that it must add the object to the +list `old_objects_to_trace`. This flag gets added automatically in +every minor collection to all private & protected objects that are +moved out of the nursery. +On public objects, this flag means nothing and can be ignored. + + + +The implementation of ID and HASH +--------------------------------- + +The ID of an object must be unique during its lifetime. It is the same +for all copies or versions of an object. +The HASH is an identity hash, basically a hash of the ID of an object. +On prebuilt objects, one can define a value that should be returned +for a certain object (useful for objects that should have the same +HASH during compilation/translation and during runtime). + +The ID is based on the address of the object. Since objects can move +if they are young, and since they can have multiple copies, a special +non-moving location has to be defined in order for it to not change +during the lifetime of an object. For that reason, calling `stm_id` +on an object does the following: + +| if object is young: +| create a shadow copy in the non-moving space that +| is later used when moving the object out of the nursery +| else: +| the copy is non-moving and therefore the ID is its address +| OR: we already have an ID for that object + +To maintain a unique ID over all copies, every object has a `h_original` +field in its header. The point of this field is to point to a copy +of the object that is non-moving and chosen to be the *original copy*. +On the *original copy* itself, the field is `NULL`. + +(I) If we have a young object, its `h_original` is `NULL` iff there exists +**no old copy** of the same object. Otherwise it must point to it. + +(II) If we have an old object, its `h_original` is `NULL` iff it is the +*original copy*. + +These invariants must be upheld all the time. There is one single +exception to (II): Prebuilt objects (`GCFLAG_PREBUILT_ORIGINAL`), +are always their *original copy* and if `h_original` is not `NULL`, +it is a predefined HASH value for this object. This is used by +`stm_hash` which otherwise returns a hashed version of the ID of +the object. From noreply at buildbot.pypy.org Fri Jul 19 15:43:54 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 19 Jul 2013 15:43:54 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: fixes Message-ID: <20130719134354.935661C14B6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65480:8dd3aec517e0 Date: 2013-07-19 15:43 +0200 http://bitbucket.org/pypy/pypy/changeset/8dd3aec517e0/ Log: fixes diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -78,7 +78,7 @@ def _candidate(self, node): return (node.val & 1 == 0) and (node.val + 1 == node.next.val) - + def _pop_two(self, tp): node = self.master_node if node is None or node.next is None: @@ -281,6 +281,7 @@ def __init__(self, longevity, frame_manager=None, assembler=None): self.free_regs = self.all_regs[:] + self.free_regs.reverse() self.longevity = longevity self.temp_boxes = [] if not we_are_translated(): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -248,7 +248,7 @@ descr=arraydescr) op2 = ResOperation(rop.INT_LT, [arrlenbox, newsizebox], cond_box) op3 = ResOperation(rop.COND_CALL, [cond_box, resize_ptr, lst, - newsizebox], + newsizebox, ConstInt(1)], None, descr=calldescr) op4 = ResOperation(rop.SETFIELD_GC, [lst, newsizebox], None, descr=lendescr) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -802,6 +802,6 @@ p1 = getfield_gc(p0, descr=itemsdescr) i1 = arraylen_gc(p1, descr=arraydescr) i2 = int_lt(i1, i0) - cond_call(i2, 123, p0, i0, descr=list_resize_descr) + cond_call(i2, 123, p0, i0, 1, descr=list_resize_descr) setfield_gc(p0, i0, descr=lendescr) """) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -159,9 +159,12 @@ if gcrootmap and gcrootmap.is_shadow_stack: self._call_header_shadowstack(mc, gcrootmap) mc.SUB(esp, imm(WORD)) + self.set_extra_stack_depth(mc, 2 * WORD) # args are in their respective positions mc.CALL(eax) mc.ADD(esp, imm(WORD)) + self.set_extra_stack_depth(mc, 0) + self._reload_frame_if_necessary(mc, align_stack=True) if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(mc, gcrootmap) self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats, @@ -2144,7 +2147,7 @@ def label(self): self._check_frame_depth_debug(self.mc) - def cond_call(self, op, gcmap, cond_loc, call_loc, arglocs): + def cond_call(self, op, gcmap, cond_loc, call_loc): self.mc.TEST(cond_loc, cond_loc) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -802,15 +802,20 @@ assert op.result is None args = op.getarglist() assert 2 <= len(args) <= 4 + 2 - loc_call = self.make_sure_var_in_reg(args[1], [], selected_reg=eax) - args_so_far = [args[1]] + tmpbox = TempBox() + self.rm.force_allocate_reg(tmpbox, selected_reg=eax) + v = args[1] + assert isinstance(v, Const) + imm = self.rm.convert_to_imm(v) + self.assembler.regalloc_mov(imm, eax) + args_so_far = [tmpbox] for i in range(2, len(args)): reg = self.rm.register_arguments[i - 2] self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) args_so_far.append(args[i]) loc_cond = self.make_sure_var_in_reg(args[0], args) - self.assembler.cond_call(op, self.get_gcmap(), loc_cond, loc_call, - [edi]) + self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax) + self.rm.possibly_free_var(tmpbox) def consider_call_malloc_nursery(self, op): size_box = op.getarg(0) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1635,8 +1635,11 @@ arraydescr = self.cpu.arraydescrof(LIST.items.TO) oopspec = "list.resize_hint_really" c_func, TP = support.builtin_func_for_spec(self.cpu.rtyper, - oopspec, [lltype.Ptr(LIST), lltype.Signed], lltype.Void) - op1 = SpaceOperation('direct_call', [c_func] + args, op.result) + oopspec, [lltype.Ptr(LIST), lltype.Signed, lltype.Bool], + lltype.Void) + op1 = SpaceOperation('direct_call', [c_func] + args + + [Constant(1, concretetype=lltype.Bool)], + op.result) calldescr = self.callcontrol.getcalldescr(op1) addr = llmemory.cast_ptr_to_adr(c_func.value) extradescrs = [lendescr, itemsdescr, arraydescr, diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -213,8 +213,7 @@ _ll_5_list_ll_arraycopy = rgc.ll_arraycopy -def _ll_2_list_resize_hint_really(l, newsize): - rlist_ll._ll_list_resize_hint_really(l, newsize, True) +_ll_3_list_resize_hint_really = rlist_ll._ll_list_resize_hint_really @elidable def _ll_1_gc_identityhash(x): From noreply at buildbot.pypy.org Fri Jul 19 15:46:31 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 15:46:31 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: magically make it work Message-ID: <20130719134631.58C2E1C14B6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: weakref Changeset: r426:4cad3aa5a20b Date: 2013-07-19 15:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/4cad3aa5a20b/ Log: magically make it work (we shouldn't run over list_of_read_objects after a major collection decided to partially fix it (or not at all)) diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -205,10 +205,10 @@ weaknodeptr allocate_weaknodeptr(nodeptr to) { weaknodeptr w; - push_roots(1); + push_roots(); w = (weaknodeptr)stm_weakref_allocate(WEAKNODE_SIZE, GCTID_WEAKREF, (gcptr)to); - pop_roots(1); + pop_roots(); return w; } @@ -490,7 +490,6 @@ assert(stm_get_tid((gcptr)ww) == GCTID_WEAKREF); if (ww->node) { check((gcptr)ww->node); - return (gcptr)ww->node; } else { t->weakref = NULL; diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -3,7 +3,7 @@ void stm_copy_to_old_id_copy(gcptr obj, gcptr id) { - //assert(!is_in_nursery(thread_descriptor, id)); + //assert(!stmgc_is_in_nursery(thread_descriptor, id)); assert(id->h_tid & GCFLAG_OLD); size_t size = stmgc_size(obj); diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -461,11 +461,11 @@ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_young_stubs) == 0); + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); /* NOT NECESSARILY EMPTY: - list_of_read_objects - private_from_protected - public_to_private - - old_objects_to_trace */ assert(gcptrlist_size(&d->list_of_read_objects) == d->num_read_objects_known_old); @@ -497,8 +497,15 @@ /* If we're aborting this transaction anyway, we don't need to do * more here. */ - if (d->active < 0) - return; /* already "aborted" during forced minor collection */ + if (d->active < 0) { + /* already "aborted" during forced minor collection + clear list of read objects so that a possible minor collection + before the abort doesn't trip + fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; + return; + } if (d->active == 2) { /* inevitable transaction: clear the list of read objects */ @@ -527,6 +534,9 @@ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); AbortTransactionAfterCollect(d, ABRT_COLLECT_MAJOR); + /* fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; return; } diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -1,7 +1,6 @@ #include "stmimpl.h" - -static int is_in_nursery(struct tx_descriptor *d, gcptr obj) +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj) { return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); } @@ -155,7 +154,7 @@ gcptr fresh_old_copy; struct tx_descriptor *d = thread_descriptor; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* not a nursery object */ } else { @@ -382,7 +381,7 @@ for (i = d->list_of_read_objects.size - 1; i >= limit; --i) { gcptr obj = items[i]; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* non-young or visited young objects are kept */ continue; } diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -67,5 +67,6 @@ size_t stmgc_size(gcptr); void stmgc_trace(gcptr, void visit(gcptr *)); void stmgc_minor_collect_soon(void); +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj); #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -134,6 +134,7 @@ #define GCFLAG_STUB ... #define GCFLAG_PRIVATE_FROM_PROTECTED ... #define GCFLAG_HAS_ID ... + #define GCFLAG_IMMUTABLE ... #define ABRT_MANUAL ... typedef struct { ...; } page_header_t; ''') diff --git a/c4/test/test_weakref.py b/c4/test/test_weakref.py --- a/c4/test/test_weakref.py +++ b/c4/test/test_weakref.py @@ -15,7 +15,7 @@ def test_weakref_invalidate(self): p2 = nalloc(HDR) p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE assert p1.h_revision == lib.get_private_rev_num() assert lib.rawgetptr(p1, 0) == p2 lib.stm_push_root(p1) @@ -31,7 +31,7 @@ def test_weakref_keep(self): p2 = nalloc(HDR) p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE assert p1.h_revision == lib.get_private_rev_num() assert lib.rawgetptr(p1, 0) == p2 lib.stm_push_root(p1) @@ -44,7 +44,7 @@ def test_weakref_old_keep(self): p2 = oalloc(HDR) p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) - assert p1.h_tid == WEAKREF_TID # no GC flags + assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE assert p1.h_revision == lib.get_private_rev_num() assert lib.rawgetptr(p1, 0) == p2 lib.stm_push_root(p1) diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -18,10 +18,6 @@ /***** Minor collection *****/ -static int is_in_nursery(struct tx_descriptor *d, gcptr obj) -{ - return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); -} void stm_move_young_weakrefs(struct tx_descriptor *d) { @@ -39,7 +35,7 @@ gcptr pointing_to = WEAKREF_PTR(weakref, size); assert(pointing_to != NULL); - if (is_in_nursery(d, pointing_to)) { + if (stmgc_is_in_nursery(d, pointing_to)) { if (pointing_to->h_tid & GCFLAG_NURSERY_MOVED) { dprintf(("weakref ptr moved %p->%p\n", WEAKREF_PTR(weakref, size), From noreply at buildbot.pypy.org Fri Jul 19 16:06:25 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 19 Jul 2013 16:06:25 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add myself Message-ID: <20130719140625.8B36B1C303C@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r4999:08c9b80420a4 Date: 2013-07-19 16:05 +0200 http://bitbucket.org/pypy/extradoc/changeset/08c9b80420a4/ Log: Add myself diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -11,6 +11,7 @@ Name Arrive/Depart Accomodation ==================== ============== ======================= Carl Friedrich Bolz ? ? +Romain Guillebert ? ? ==================== ============== ======================= From noreply at buildbot.pypy.org Fri Jul 19 16:16:39 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 19 Jul 2013 16:16:39 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: merge weakrefs Message-ID: <20130719141639.1D29E1C0130@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: copy-over-original2 Changeset: r427:b39faaf63e68 Date: 2013-07-19 16:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/b39faaf63e68/ Log: merge weakrefs diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -16,10 +16,10 @@ H_FILES = atomic_ops.h stmgc.h stmimpl.h \ et.h lists.h steal.h nursery.h gcpage.h \ - stmsync.h extra.h dbgmem.h fprintcolor.h + stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h C_FILES = et.c lists.c steal.c nursery.c gcpage.c \ - stmsync.c extra.c dbgmem.c fprintcolor.c + stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -25,27 +25,46 @@ // SUPPORT #define GCTID_STRUCT_NODE 123 +#define GCTID_WEAKREF 122 + +struct node; +typedef struct node * nodeptr; +struct weak_node { + struct stm_object_s hdr; + nodeptr node; +}; +typedef struct weak_node * weaknodeptr; +#define WEAKNODE_SIZE sizeof(struct weak_node) struct node { struct stm_object_s hdr; long value; revision_t id; revision_t hash; - struct node *next; + nodeptr next; + weaknodeptr weakref; }; -typedef struct node * nodeptr; + + size_t stmcb_size(gcptr ob) { - assert(stm_get_tid(ob) == GCTID_STRUCT_NODE); - return sizeof(struct node); + if (stm_get_tid(ob) == GCTID_STRUCT_NODE) + return sizeof(struct node); + else if (stm_get_tid(ob) == GCTID_WEAKREF) + return WEAKNODE_SIZE; + assert(0); } + void stmcb_trace(gcptr ob, void visit(gcptr *)) { nodeptr n; + if (stm_get_tid(ob) == GCTID_WEAKREF) + return; assert(stm_get_tid(ob) == GCTID_STRUCT_NODE); n = (nodeptr)ob; visit((gcptr *)&n->next); + visit((gcptr *)&n->weakref); } @@ -53,11 +72,6 @@ time_t default_seed; gcptr shared_roots[SHARED_ROOTS]; -#define CACHE_MASK 65535 -#define CACHE_ENTRIES ((CACHE_MASK + 1) / sizeof(char *)) -#define CACHE_AT(cache, obj) (*(gcptr *)((char *)(cache) \ - + ((revision_t)(obj) & CACHE_MASK))) - struct thread_data { unsigned int thread_seed; gcptr roots[MAXROOTS]; @@ -67,7 +81,6 @@ int steps_left; int interruptible; int atomic; - revision_t writeable[CACHE_ENTRIES]; }; __thread struct thread_data td; @@ -105,6 +118,21 @@ return (int)(rand_r(&td.thread_seed) % (unsigned int)max); } +gcptr get_random_root() +{ + int num = get_rand(td.num_roots + 1); + if (num == 0) + return stm_thread_local_obj; + else + return td.roots[num - 1]; +} + +gcptr get_random_shared_root() +{ + int num = get_rand(SHARED_ROOTS); + return shared_roots[num]; +} + void copy_roots(gcptr *from, gcptr *to, int num) { int i; @@ -137,7 +165,7 @@ return x; } -void push_roots(int with_cache) +void push_roots() { int i; for (i = 0; i < td.num_roots; i++) { @@ -145,30 +173,11 @@ if (td.roots[i]) stm_push_root(td.roots[i]); } - - if (with_cache) { - stm_push_root(NULL); - for (i = 0; i < CACHE_ENTRIES; i++) { - if (td.writeable[i]) - stm_push_root((gcptr)td.writeable[i]); - } - } } -void pop_roots(int with_cache) +void pop_roots() { int i; - /* some objects may have changed positions */ - memset(td.writeable, 0, sizeof(td.writeable)); - - if (with_cache) { - gcptr obj = stm_pop_root(); - while (obj) { - CACHE_AT(td.writeable, obj) = obj; - obj = stm_pop_root(); - } - } - for (i = td.num_roots - 1; i >= 0; i--) { if (td.roots[i]) td.roots[i] = stm_pop_root(); @@ -186,12 +195,33 @@ nodeptr allocate_node() { nodeptr r; - push_roots(1); + push_roots(); r = (nodeptr)stm_allocate(sizeof(struct node), GCTID_STRUCT_NODE); - pop_roots(1); + pop_roots(); return r; } + +weaknodeptr allocate_weaknodeptr(nodeptr to) +{ + weaknodeptr w; + push_roots(); + w = (weaknodeptr)stm_weakref_allocate(WEAKNODE_SIZE, GCTID_WEAKREF, + (gcptr)to); + pop_roots(); + return w; +} + +void set_weakref(nodeptr n, nodeptr to) +{ + stm_push_root((gcptr)n); + weaknodeptr w = allocate_weaknodeptr(to); + n = (nodeptr)stm_pop_root(); + n = (nodeptr)stm_write_barrier((gcptr)n); + n->weakref = w; + dprintf(("set_weakref %p -> %p -> %p\n", n, w, to)); +} + int is_shared_prebuilt(gcptr p) { int i; @@ -252,7 +282,6 @@ if (p != NULL) { check(p); w = stm_write_barrier(p); - CACHE_AT(td.writeable, w) = w; check(w); assert(is_private(w)); } @@ -369,22 +398,22 @@ { int k = get_rand(100); if (k < 10) { - push_roots(1); + push_roots(); stm_push_root(p); stm_become_inevitable("fun"); p = stm_pop_root(); - pop_roots(1); + pop_roots(); } else if (k < 40) { - push_roots(1); + push_roots(); stmgc_minor_collect(); - pop_roots(1); + pop_roots(); p = NULL; } else if (k < 41 && DO_MAJOR_COLLECTS) { fprintf(stdout, "major collect\n"); - push_roots(1); + push_roots(); stmgcpage_possibly_major_collect(1); - pop_roots(1); + pop_roots(); p = NULL; } return p; @@ -423,10 +452,7 @@ break; case 7: // set 'p' as *next in one of the roots check(_r); - if (CACHE_AT(td.writeable, _r) == _r) - w_r = (nodeptr)_r; - else - w_r = (nodeptr)write_barrier(_r); + w_r = (nodeptr)write_barrier(_r); check((gcptr)w_r); check(p); w_r->next = (struct node*)p; @@ -447,6 +473,46 @@ return p; } +gcptr weakref_events(gcptr p, gcptr _r, gcptr _sr) +{ + nodeptr t; + weaknodeptr w, ww; + gcptr ptrs[] = {_r, _sr}; + + int i = get_rand(2); + int k = get_rand(3); + switch (k) { + case 0: // check weakref + t = (nodeptr)read_barrier(ptrs[i]); + w = t->weakref; + if(w) { + ww = (weaknodeptr)stm_read_barrier((gcptr)w); + assert(stm_get_tid((gcptr)ww) == GCTID_WEAKREF); + if (ww->node) { + check((gcptr)ww->node); + } + else { + t->weakref = NULL; + } + } + p = NULL; + break; + case 1: // set weakref to something + if (p) + set_weakref((nodeptr)_r, (nodeptr)p); + else + set_weakref((nodeptr)_r, (nodeptr)get_random_root()); + p = NULL; + break; + case 2: // set weakref on shared roots + set_weakref((nodeptr)_sr, (nodeptr)get_random_shared_root()); + p = NULL; + break; + } + return p; +} + + gcptr shared_roots_events(gcptr p, gcptr _r, gcptr _sr) { nodeptr w_sr; @@ -461,7 +527,7 @@ break; case 2: w_sr = (nodeptr)write_barrier(_sr); - w_sr->next = (nodeptr)shared_roots[get_rand(SHARED_ROOTS)]; + w_sr->next = (nodeptr)get_random_shared_root(); break; } return p; @@ -485,10 +551,7 @@ assert(w_t->id == stm_id((gcptr)_t)); } else { - if (CACHE_AT(td.writeable, _t) == _t) - w_t = (nodeptr)_t; - else - w_t = (nodeptr)write_barrier(_t); + w_t = (nodeptr)write_barrier(_t); w_t->id = stm_id((gcptr)w_t); assert(w_t->id == stm_id((gcptr)_t)); } @@ -504,10 +567,7 @@ assert(w_t->hash == stm_hash((gcptr)_t)); } else { - if (CACHE_AT(td.writeable, _t) == _t) - w_t = (nodeptr)_t; - else - w_t = (nodeptr)write_barrier(_t); + w_t = (nodeptr)write_barrier(_t); w_t->hash = stm_hash((gcptr)w_t); assert(w_t->hash == stm_hash((gcptr)_t)); } @@ -526,18 +586,12 @@ gcptr do_step(gcptr p) { gcptr _r, _sr; - int num, k; + int k; - num = get_rand(td.num_roots+1); - if (num == 0) - _r = stm_thread_local_obj; - else - _r = td.roots[num - 1]; - - num = get_rand(SHARED_ROOTS); - _sr = shared_roots[num]; + _r = get_random_root(); + _sr = get_random_shared_root(); - k = get_rand(9); + k = get_rand(11); check(p); assert(thread_descriptor->active); @@ -549,6 +603,8 @@ p = id_hash_events(p, _r, _sr); else if (k < 8) p = rare_events(p, _r, _sr); + else if (k < 10) + p = weakref_events(p, _r, _sr); else if (get_rand(20) == 1) { // transaction break fprintf(stdout, "|"); @@ -563,7 +619,7 @@ void transaction_break() { - push_roots(0); + push_roots(); td.interruptible = 1; copy_roots(td.roots, td.roots_outside_perform, td.num_roots); @@ -575,9 +631,7 @@ copy_roots(td.roots_outside_perform, td.roots, td.num_roots); td.interruptible = 0; - pop_roots(0); - - /* done by pop_roots() memset(&td.writeable, 0, sizeof(td.writeable)); */ + pop_roots(); } @@ -592,8 +646,8 @@ assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF); arg1 = stm_pop_root(); assert(arg1 == NULL); - pop_roots(0); - push_roots(0); + pop_roots(); + push_roots(); stm_push_root(arg1); stm_push_root(end_marker); @@ -609,9 +663,6 @@ { gcptr p = NULL; - // clear cache of writeables: - memset(&td.writeable, 0, sizeof(td.writeable)); - while (td.steps_left-->0 || td.atomic) { if (td.steps_left % 8 == 0) fprintf(stdout, "#"); diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -6,6 +6,29 @@ */ #include "stmimpl.h" +#ifdef _GC_DEBUG +char tmp_buf[128]; +char* stm_dbg_get_hdr_str(gcptr obj) +{ + char *cur; + char *flags[] = GC_FLAG_NAMES; + int i; + + i = 0; + cur = tmp_buf; + cur += sprintf(cur, "%p:", obj); + while (flags[i]) { + if (obj->h_tid & (STM_FIRST_GCFLAG << i)) { + cur += sprintf(cur, "%s|", flags[i]); + } + i++; + } + cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); + return tmp_buf; +} +#endif + + __thread struct tx_descriptor *thread_descriptor = NULL; @@ -545,6 +568,7 @@ gcptr stm_WriteBarrier(gcptr P) { + assert(!(P->h_tid & GCFLAG_IMMUTABLE)); if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -72,6 +72,8 @@ static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; +static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; + /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -89,6 +91,8 @@ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ + "HAS_ID", \ + "IMMUTABLE", \ NULL } #define IS_POINTER(v) (!((v) & 1)) /* even-valued number */ @@ -196,4 +200,7 @@ void DescriptorInit(void); void DescriptorDone(void); +#ifdef _GC_DEBUG +char* stm_dbg_get_hdr_str(gcptr obj); +#endif #endif /* _ET_H */ diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -3,7 +3,7 @@ void stm_copy_to_old_id_copy(gcptr obj, gcptr id) { - //assert(!is_in_nursery(thread_descriptor, id)); + //assert(!stmgc_is_in_nursery(thread_descriptor, id)); assert(id->h_tid & GCFLAG_OLD); size_t size = stmgc_size(obj); diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -222,12 +222,14 @@ if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - id_copy->h_tid |= GCFLAG_VISITED; - assert(!(id_copy->h_tid & GCFLAG_MOVED)); + if (!(id_copy->h_tid & GCFLAG_VISITED)) { + id_copy->h_tid |= GCFLAG_VISITED; + assert(!(id_copy->h_tid & GCFLAG_MOVED)); - /* XXX: may not always need tracing? */ - if (!(id_copy->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, id_copy); + /* XXX: may not always need tracing? */ + if (!(id_copy->h_tid & GCFLAG_STUB)) + gcptrlist_insert(&objects_to_trace, id_copy); + } } else { /* prebuilt originals won't get collected anyway @@ -237,6 +239,14 @@ } } +static void visit(gcptr *pobj); + +gcptr stmgcpage_visit(gcptr obj) +{ + visit(&obj); + return obj; +} + static gcptr copy_over_original(gcptr obj) { assert(!(obj->h_tid & GCFLAG_VISITED)); @@ -346,10 +356,10 @@ keep_original_alive(prev_obj); assert(*pobj == prev_obj); - gcptr obj1 = obj; - visit(&obj1); /* recursion, but should be only once */ + /* recursion, but should be only once */ + obj = stmgcpage_visit(obj); assert(prev_obj->h_tid & GCFLAG_STUB); - prev_obj->h_revision = ((revision_t)obj1) | 2; + prev_obj->h_revision = ((revision_t)obj) | 2; return; } } @@ -481,8 +491,6 @@ static void mark_all_stack_roots(void) { - int i; - gcptr *items; struct tx_descriptor *d; struct G2L new_public_to_private; memset(&new_public_to_private, 0, sizeof(struct G2L)); @@ -493,15 +501,6 @@ /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); - /* some roots (^^^) can also be in this list, and - we may have a stolen priv_from_prot in here that, - when visited, resolves to its backup (or further) */ - items = d->old_objects_to_trace.items; - for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) { - visit(&items[i]); - gcptrlist_insert(&objects_to_trace, items[i]); - } - /* the thread-local object */ visit(d->thread_local_obj_ref); visit(&d->old_thread_local_obj); @@ -570,11 +569,11 @@ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_young_stubs) == 0); + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); /* NOT NECESSARILY EMPTY: - list_of_read_objects - private_from_protected - public_to_private - - old_objects_to_trace */ assert(gcptrlist_size(&d->list_of_read_objects) == d->num_read_objects_known_old); @@ -608,23 +607,20 @@ } } - - /* we visit old_objects_to_trace during marking and thus, they - should be up-to-date */ -#ifdef _GC_DEBUG - items = d->old_objects_to_trace.items; - for (i = d->old_objects_to_trace.size - 1; i >= 0; i--) { - gcptr obj = items[i]; - assert(!(obj->h_tid & GCFLAG_MOVED)); - assert(obj->h_tid & GCFLAG_VISITED); - } -#endif + assert(d->old_objects_to_trace.size == 0); /* If we're aborting this transaction anyway, we don't need to do * more here. */ - if (d->active < 0) - return; /* already "aborted" during forced minor collection */ + if (d->active < 0) { + /* already "aborted" during forced minor collection + clear list of read objects so that a possible minor collection + before the abort doesn't trip + fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; + return; + } if (d->active == 2) { /* inevitable transaction: clear the list of read objects */ @@ -661,6 +657,9 @@ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); AbortTransactionAfterCollect(d, ABRT_COLLECT_MAJOR); + /* fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; return; } @@ -919,9 +918,13 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); mark_all_stack_roots(); - visit_all_objects(); + do { + visit_all_objects(); + stm_visit_old_weakrefs(); + } while (gcptrlist_size(&objects_to_trace) != 0); gcptrlist_delete(&objects_to_trace); clean_up_lists_of_read_objects_and_fix_outdated_flags(); + stm_clean_old_weakrefs(); mc_total_in_use = mc_total_reserved = 0; free_all_unused_local_pages(); diff --git a/c4/gcpage.h b/c4/gcpage.h --- a/c4/gcpage.h +++ b/c4/gcpage.h @@ -45,7 +45,8 @@ /* These fields are in tx_public_descriptor rather than tx_descriptor. The indirection allows us to keep around the lists of pages even - after the thread finishes, until the next major collection. + after the thread finishes. Such a "zombie" tx_public_descriptor + is reused by the next thread that starts. */ #define GCPAGE_FIELDS_DECL \ /* The array 'pages_for_size' contains GC_SMALL_REQUESTS \ @@ -65,7 +66,10 @@ /* A set of all non-small objects (outside the nursery). \ We could also have a single global set, but this avoids \ locking in stmgcpage_malloc/free. */ \ - struct G2L nonsmall_objects; + struct G2L nonsmall_objects; \ + \ + /* Weakref support */ \ + struct GcPtrList old_weakrefs; #define LOCAL_GCPAGES() (thread_descriptor->public_descriptor) @@ -80,6 +84,7 @@ void stmgcpage_add_prebuilt_root(gcptr obj); void stmgcpage_possibly_major_collect(int force); long stmgcpage_count(int quantity); +gcptr stmgcpage_visit(gcptr); extern struct GcPtrList stm_prebuilt_gcroots; diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -1,7 +1,6 @@ #include "stmimpl.h" - -static int is_in_nursery(struct tx_descriptor *d, gcptr obj) +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj) { return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); } @@ -54,6 +53,7 @@ gcptrlist_delete(&d->old_objects_to_trace); gcptrlist_delete(&d->public_with_young_copy); + gcptrlist_delete(&d->young_weakrefs); } void stmgc_minor_collect_soon(void) @@ -100,6 +100,13 @@ return P; } +gcptr stm_allocate_immutable(size_t size, unsigned long tid) +{ + gcptr P = stm_allocate(size, tid); + P->h_tid |= GCFLAG_IMMUTABLE; + return P; +} + gcptr stmgc_duplicate(gcptr P) { size_t size = stmgc_size(P); @@ -125,9 +132,6 @@ } /************************************************************/ -/* list for private/protected, old roots that need to be - kept in old_objects_to_trace */ -static __thread struct GcPtrList private_or_protected_roots = {0, 0, NULL}; static inline gcptr create_old_object_copy(gcptr obj) { @@ -150,7 +154,7 @@ gcptr fresh_old_copy; struct tx_descriptor *d = thread_descriptor; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* not a nursery object */ } else { @@ -207,22 +211,6 @@ (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ visit_if_young(end); - item = *end; - /* if private or protected, this object needs to be - traced again in the next minor_collect if it is - currently in old_objects_to_trace. Because then - it may be seen as write-ready in the view of - someone: - pw = write_barrier(); push_root(pw); - minor_collect(); pw = pop_root(); // pw still write-ready - */ - if (item - && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in - obj_to_trace*/ - && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED - || item->h_revision == stm_private_rev_num)) { - gcptrlist_insert(&private_or_protected_roots, item); - } } else if (item != NULL) { if (item == END_MARKER_OFF) @@ -377,29 +365,12 @@ stmgc_trace(obj, &visit_if_young); } - - while (gcptrlist_size(&private_or_protected_roots) > 0) { - gcptr obj = gcptrlist_pop(&private_or_protected_roots); - /* if it has the write_barrier flag, clear it so that - it doesn't get inserted twice by a later write-barrier */ - if (obj->h_tid & GCFLAG_WRITE_BARRIER) { - /* only insert those that were in old_obj_to_trace - and that we didn't insert already */ - obj->h_tid &= ~GCFLAG_WRITE_BARRIER; - gcptrlist_insert(&d->old_objects_to_trace, obj); - dprintf(("re-add %p to old_objects_to_trace\n", obj)); - } - } } static void fix_list_of_read_objects(struct tx_descriptor *d) { long i, limit = d->num_read_objects_known_old; gcptr *items = d->list_of_read_objects.items; - - if (d->active < 0) - return; // aborts anyway - assert(d->list_of_read_objects.size >= limit); if (d->active == 2) { @@ -410,7 +381,7 @@ for (i = d->list_of_read_objects.size - 1; i >= limit; --i) { gcptr obj = items[i]; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* non-young or visited young objects are kept */ continue; } @@ -442,8 +413,9 @@ static void teardown_minor_collect(struct tx_descriptor *d) { - //assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); + assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); spinlock_release(d->public_descriptor->collection_lock); @@ -479,6 +451,8 @@ surviving young-but-outside-the-nursery objects have been flagged with GCFLAG_OLD */ + stm_move_young_weakrefs(d); + teardown_minor_collect(d); assert(!stm_has_got_any_lock(d)); @@ -545,9 +519,9 @@ !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); - assert(IMPLIES(d->active > 0, - gcptrlist_size(&d->list_of_read_objects) >= - d->num_read_objects_known_old)); + assert(gcptrlist_size(&d->young_weakrefs) == 0); + assert(gcptrlist_size(&d->list_of_read_objects) >= + d->num_read_objects_known_old); assert(gcptrlist_size(&d->private_from_protected) >= d->num_private_from_protected_known_old); d->num_read_objects_known_old = diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -50,7 +50,10 @@ still in the same transaction, to know that the initial \ part of the lists cannot contain young objects any more. */ \ long num_private_from_protected_known_old; \ - long num_read_objects_known_old; + long num_read_objects_known_old; \ + \ + /* Weakref support */ \ + struct GcPtrList young_weakrefs; struct tx_descriptor; /* from et.h */ @@ -64,5 +67,6 @@ size_t stmgc_size(gcptr); void stmgc_trace(gcptr, void visit(gcptr *)); void stmgc_minor_collect_soon(void); +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj); #endif diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -23,9 +23,56 @@ { gcptr stub, obj = *pobj; if (obj == NULL || (obj->h_tid & (GCFLAG_PUBLIC | GCFLAG_OLD)) == - (GCFLAG_PUBLIC | GCFLAG_OLD)) + (GCFLAG_PUBLIC | GCFLAG_OLD)) return; + if (obj->h_tid & GCFLAG_IMMUTABLE) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public, replace with stolen old copy */ + assert(obj->h_tid & GCFLAG_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ + goto done; + } + + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; + } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + + return; + } + /* we use 'all_stubs', a dictionary, in order to try to avoid duplicate stubs for the same object. XXX maybe it would be better to use a fast approximative cache that stays around for diff --git a/c4/stmgc.c b/c4/stmgc.c --- a/c4/stmgc.c +++ b/c4/stmgc.c @@ -10,5 +10,6 @@ #include "gcpage.c" #include "stmsync.c" #include "extra.c" +#include "weakref.c" #include "dbgmem.c" #include "fprintcolor.c" diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -29,6 +29,9 @@ /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); +/* allocate an object that is be immutable. it cannot be changed with + a stm_write_barrier() or after the next commit */ +gcptr stm_allocate_immutable(size_t size, unsigned long tid); /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); @@ -54,11 +57,19 @@ int stm_enter_callback_call(void); void stm_leave_callback_call(int); -/* read/write barriers (the most general versions only for now) */ -#if 0 // (optimized version below) -gcptr stm_read_barrier(gcptr); -gcptr stm_write_barrier(gcptr); -#endif +/* read/write barriers (the most general versions only for now). + + - the read barrier must be applied before reading from an object. + the result is valid as long as we're in the same transaction, + and stm_write_barrier() is not called on the same object. + + - the write barrier must be applied before writing to an object. + the result is valid for a shorter period of time: we have to + do stm_write_barrier() again if we ended the transaction, or + if we did a potential collection (e.g. stm_allocate()). +*/ +static inline gcptr stm_read_barrier(gcptr); +static inline gcptr stm_write_barrier(gcptr); /* start a new transaction, calls callback(), and when it returns finish that transaction. callback() is called with the 'arg' @@ -114,6 +125,14 @@ void stm_minor_collect(void); void stm_major_collect(void); +/* weakref support: allocate a weakref object, and set it to point + weakly to 'obj'. The weak pointer offset is hard-coded to be at + 'size - WORD'. Important: stmcb_trace() must NOT trace it. + Weakrefs are *immutable*! Don't attempt to use stm_write_barrier() + on them. */ +gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); + + /**************** END OF PUBLIC INTERFACE *****************/ /************************************************************/ diff --git a/c4/stmimpl.h b/c4/stmimpl.h --- a/c4/stmimpl.h +++ b/c4/stmimpl.h @@ -36,5 +36,6 @@ #include "steal.h" #include "stmsync.h" #include "extra.h" +#include "weakref.h" #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -11,11 +11,11 @@ header_files = [os.path.join(parent_dir, _n) for _n in "et.h lists.h steal.h nursery.h gcpage.h " - "stmsync.h extra.h dbgmem.h fprintcolor.h " + "stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in "et.c lists.c steal.c nursery.c gcpage.c " - "stmsync.c extra.c dbgmem.c fprintcolor.c".split()] + "stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -46,7 +46,7 @@ #define PREBUILT_FLAGS ... #define PREBUILT_REVISION ... - gcptr stm_allocate(size_t size, unsigned int tid); + gcptr stm_allocate(size_t size, unsigned long tid); revision_t stm_hash(gcptr); revision_t stm_id(gcptr); _Bool stm_pointer_equal(gcptr, gcptr); @@ -69,6 +69,7 @@ void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); void stm_abort_and_retry(void); + gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); /* extra non-public code */ void printfcolor(char *msg); @@ -133,6 +134,7 @@ #define GCFLAG_STUB ... #define GCFLAG_PRIVATE_FROM_PROTECTED ... #define GCFLAG_HAS_ID ... + #define GCFLAG_IMMUTABLE ... #define ABRT_MANUAL ... typedef struct { ...; } page_header_t; ''') @@ -164,14 +166,18 @@ gcptr rawgetptr(gcptr obj, long index) { - assert(gettid(obj) > 42142 + index); + revision_t t = gettid(obj); + if (t == 42142) t++; + assert(t > 42142 + index); return ((gcptr *)(obj + 1))[index]; } void rawsetptr(gcptr obj, long index, gcptr newvalue) { fprintf(stderr, "%p->[%ld] = %p\n", obj, index, newvalue); - assert(gettid(obj) > 42142 + index); + revision_t t = gettid(obj); + if (t == 42142) t++; + assert(t > 42142 + index); ((gcptr *)(obj + 1))[index] = newvalue; } @@ -282,6 +288,8 @@ else { int nrefs = gettid(obj) - 42142; assert(nrefs < 100); + if (nrefs == 0) /* weakrefs */ + nrefs = 1; return sizeof(*obj) + nrefs * sizeof(gcptr); } } @@ -484,7 +492,7 @@ def oalloc_refs(nrefs): """Allocate an 'old' protected object, outside any nursery, with nrefs pointers""" - size = HDR + WORD * nrefs + size = HDR + WORD * (nrefs or 1) p = lib.stmgcpage_malloc(size) lib.memset(p, 0, size) p.h_tid = GCFLAG_OLD | GCFLAG_WRITE_BARRIER @@ -506,9 +514,9 @@ def nalloc_refs(nrefs): "Allocate a fresh object from the nursery, with nrefs pointers" - p = lib.stm_allocate(HDR + WORD * nrefs, 42142 + nrefs) + p = lib.stm_allocate(HDR + WORD * (nrefs or 1), 42142 + nrefs) assert p.h_revision == lib.get_private_rev_num() - for i in range(nrefs): + for i in range(nrefs or 1): assert rawgetptr(p, i) == ffi.NULL # must already be zero-filled return p @@ -524,9 +532,9 @@ def palloc_refs(nrefs, prehash=None): "Get a ``prebuilt'' object with nrefs pointers." if prehash is None: - p = lib.pseudoprebuilt(HDR + WORD * nrefs, 42142 + nrefs) + p = lib.pseudoprebuilt(HDR + WORD * (nrefs or 1), 42142 + nrefs) else: - p = lib.pseudoprebuilt_with_hash(HDR + WORD * nrefs, + p = lib.pseudoprebuilt_with_hash(HDR + WORD * (nrefs or 1), 42142 + nrefs, prehash) return p @@ -686,5 +694,8 @@ should_break_transaction = lib.stm_should_break_transaction - +WEAKREF_SIZE = HDR + WORD +WEAKREF_TID = 42142 + + nrb_protected = ffi.cast("gcptr", -1) diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -205,6 +205,8 @@ assert list_of_read_objects() == [p2] def test_write_barrier_after_minor_collect(): + # should fail + py.test.skip("should fail now") p = oalloc_refs(1) pw = lib.stm_write_barrier(p) @@ -220,8 +222,10 @@ assert pw.h_tid & GCFLAG_OLD rawsetptr(pw, 0, r) - # pw needs to be readded to old_objects_to_trace - # before the next minor gc in order for this test to pass + # pw not in old_objects_to_trace. A + # repeated write_barrier before + # rawsetptr() would fix that + lib.stm_push_root(r) minor_collect() minor_collect() @@ -232,6 +236,10 @@ pr = lib.stm_read_barrier(p) assert r != r2 + # these will fail because pw/pr was + # not traced in the last minor_collect, + # because they were not registered in + # old_objects_to_trace. assert getptr(pr, 0) != r assert getptr(pr, 0) == r2 @@ -251,6 +259,7 @@ assert getptr(pr, 0) != q2 def test_write_barrier_after_minor_collect_young_to_old(): + py.test.skip("should fail now") p = nalloc_refs(1) pw = lib.stm_write_barrier(p) diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -200,6 +200,84 @@ check_not_free(p2) assert classify(p2) == "private" +def test_old_private_from_protected_to_young_private_2(): + py.test.skip("not valid") + p0 = nalloc_refs(1) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + lib.setptr(p0, 0, ffi.NULL) + assert classify(p0) == "private_from_protected" + assert lib.in_nursery(p0) # a young private_from_protected + # + lib.stm_push_root(p0) + minor_collect() + p0 = lib.stm_pop_root() + assert classify(p0) == "private_from_protected" + assert not lib.in_nursery(p0) # becomes an old private_from_protected + # + # Because it's a private_from_protected, its h_revision is a pointer + # to the backup copy, and not stm_private_rev_num. It means that the + # write barrier will always enter its slow path, even though the + # GCFLAG_WRITE_BARRIER is not set. + assert p0.h_revision != lib.get_private_rev_num() + assert not (p0.h_tid & GCFLAG_WRITE_BARRIER) + # + p1 = nalloc(HDR) + lib.setptr(p0, 0, p1) # should trigger the write barrier again + assert classify(p0) == "private_from_protected" + lib.stm_push_root(p0) + minor_collect() + p0b = lib.stm_pop_root() + assert p0b == p0 + check_nursery_free(p1) + assert classify(p0) == "private_from_protected" + p2 = lib.getptr(p0, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + assert classify(p2) == "private" + +def test_old_private_from_protected_to_young_private_3(): + p0 = palloc_refs(1) + pw = lib.stm_write_barrier(p0) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + pr = lib.stm_read_barrier(p0) + assert classify(pr) == "protected" + assert lib.in_nursery(pr) # a young protected + # + minor_collect() + # each minor collect adds WRITE_BARRIER to protected/private + # objects it moves out of the nursery + pr = lib.stm_read_barrier(p0) + assert pr.h_tid & GCFLAG_WRITE_BARRIER + pw = lib.stm_write_barrier(pr) + # added to old_obj_to_trace + assert not (pw.h_tid & GCFLAG_WRITE_BARRIER) + + lib.setptr(pw, 0, ffi.NULL) + assert classify(pw) == "private_from_protected" + assert not lib.in_nursery(pw) + + assert pw.h_revision != lib.get_private_rev_num() + assert not (pw.h_tid & GCFLAG_WRITE_BARRIER) + # # + + lib.stm_push_root(pw) + minor_collect() + p1 = nalloc(HDR) + pw = lib.stm_pop_root() + assert pw.h_tid & GCFLAG_WRITE_BARRIER + lib.setptr(pw, 0, p1) # should trigger the write barrier again + assert classify(pr) == "private_from_protected" + minor_collect() + check_nursery_free(p1) + pr = lib.stm_read_barrier(p0) + assert classify(pr) == "private_from_protected" + p2 = lib.getptr(pr, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + assert classify(p2) == "private" + def test_new_version(): p1 = oalloc(HDR) assert lib.stm_write_barrier(p1) == p1 diff --git a/c4/test/test_weakref.py b/c4/test/test_weakref.py new file mode 100644 --- /dev/null +++ b/c4/test/test_weakref.py @@ -0,0 +1,120 @@ +import py +from support import * + + +class BaseTest(object): + def setup_method(self, meth): + lib.stm_clear_between_tests() + lib.stm_initialize_tests(0) + def teardown_method(self, meth): + lib.stm_finalize() + + +class TestMinorCollection(BaseTest): + + def test_weakref_invalidate(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + minor_collect() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == ffi.NULL + + def test_weakref_itself_dies(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + minor_collect() + + def test_weakref_keep(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + minor_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + + def test_weakref_old_keep(self): + p2 = oalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + assert p1.h_tid == WEAKREF_TID | GCFLAG_IMMUTABLE + assert p1.h_revision == lib.get_private_rev_num() + assert lib.rawgetptr(p1, 0) == p2 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + minor_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + + +class TestMajorCollection(BaseTest): + + def test_weakref_old(self): + p2 = nalloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + # + lib.stm_push_root(p1) + major_collect() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == ffi.NULL + + def test_weakref_to_prebuilt(self): + p2 = palloc(HDR) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # + lib.stm_push_root(p1) + major_collect() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + + def test_weakref_update_version(self): + p2 = oalloc(HDR + WORD); make_public(p2) + p1 = lib.stm_weakref_allocate(WEAKREF_SIZE, WEAKREF_TID, p2) + # + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + # + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + # + lib.setlong(p2, 0, 912809218) # write barrier + assert lib.rawgetlong(p2, 0) == 0 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2 + assert lib.rawgetlong(p2, 0) == 0 + # + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + # + assert lib.rawgetlong(p2, 0) == 0 + lib.stm_push_root(p1) + lib.stm_push_root(p2) + major_collect() + p2b = lib.stm_pop_root() + p1 = lib.stm_pop_root() + assert lib.rawgetptr(p1, 0) == p2b + assert p2b != p2 + assert lib.getlong(p2b, 0) == 912809218 diff --git a/c4/weakref.c b/c4/weakref.c new file mode 100644 --- /dev/null +++ b/c4/weakref.c @@ -0,0 +1,210 @@ +#include "stmimpl.h" + +#define WEAKREF_PTR(wr, sz) (*(gcptr *)(((char *)(wr)) + (sz) - WORD)) + + +gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj) +{ + stm_push_root(obj); + gcptr weakref = stm_allocate_immutable(size, tid); + obj = stm_pop_root(); + assert(!(weakref->h_tid & GCFLAG_OLD)); /* 'size' too big? */ + assert(stmgc_size(weakref) == size); + WEAKREF_PTR(weakref, size) = obj; + gcptrlist_insert(&thread_descriptor->young_weakrefs, weakref); + dprintf(("alloc weakref %p -> %p\n", weakref, obj)); + return weakref; +} + + +/***** Minor collection *****/ + +void stm_move_young_weakrefs(struct tx_descriptor *d) +{ + /* The code relies on the fact that no weakref can be an old object + weakly pointing to a young object. Indeed, weakrefs are immutable + so they cannot point to an object that was created after it. + */ + while (gcptrlist_size(&d->young_weakrefs) > 0) { + gcptr weakref = gcptrlist_pop(&d->young_weakrefs); + if (!(weakref->h_tid & GCFLAG_MOVED)) + continue; /* the weakref itself dies */ + + weakref = (gcptr)weakref->h_revision; + size_t size = stmgc_size(weakref); + gcptr pointing_to = WEAKREF_PTR(weakref, size); + assert(pointing_to != NULL); + + if (stmgc_is_in_nursery(d, pointing_to)) { + if (pointing_to->h_tid & GCFLAG_MOVED) { + dprintf(("weakref ptr moved %p->%p\n", + WEAKREF_PTR(weakref, size), + (gcptr)pointing_to->h_revision)); + WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision; + } + else { + dprintf(("weakref lost ptr %p\n", WEAKREF_PTR(weakref, size))); + WEAKREF_PTR(weakref, size) = NULL; + continue; /* no need to remember this weakref any longer */ + } + } + else { + /* # see test_weakref_to_prebuilt: it's not useful to put + # weakrefs into 'old_objects_with_weakrefs' if they point + # to a prebuilt object (they are immortal). If moreover + # the 'pointing_to' prebuilt object still has the + # GCFLAG_NO_HEAP_PTRS flag, then it's even wrong, because + # 'pointing_to' will not get the GCFLAG_VISITED during + # the next major collection. Solve this by not registering + # the weakref into 'old_objects_with_weakrefs'. + */ + } + gcptrlist_insert(&d->public_descriptor->old_weakrefs, weakref); + } +} + + +/***** Major collection *****/ + +static _Bool is_partially_visited(gcptr obj) +{ + /* Based on gcpage.c:visit(). Check the code here if we simplify + visit(). Returns True or False depending on whether we find any + version of 'obj' to be VISITED or not. + */ + restart: + if (obj->h_tid & GCFLAG_VISITED) + return 1; + + if (obj->h_revision & 1) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!(obj->h_tid & GCFLAG_STUB)); + return 0; + } + else if (obj->h_tid & GCFLAG_PUBLIC) { + /* h_revision is a ptr: we have a more recent version */ + if (!(obj->h_revision & 2)) { + /* go visit the more recent version */ + obj = (gcptr)obj->h_revision; + } + else { + /* it's a stub */ + assert(obj->h_tid & GCFLAG_STUB); + obj = (gcptr)(obj->h_revision - 2); + } + goto restart; + } + else { + assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + gcptr B = (gcptr)obj->h_revision; + assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); + if (B->h_tid & GCFLAG_VISITED) + return 1; + assert(!(obj->h_tid & GCFLAG_STUB)); + assert(!(B->h_tid & GCFLAG_STUB)); + + if (IS_POINTER(B->h_revision)) { + assert(B->h_tid & GCFLAG_PUBLIC); + assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(B->h_revision & 2)); + + obj = (gcptr)B->h_revision; + goto restart; + } + } + return 0; +} + +static void visit_old_weakrefs(struct tx_public_descriptor *gcp) +{ + /* Note: it's possible that a weakref points to a public stub to a + protected object, and only the protected object was marked as + VISITED so far. In this case, this function needs to mark the + public stub as VISITED too. + */ + long i, size = gcp->old_weakrefs.size; + gcptr *items = gcp->old_weakrefs.items; + + for (i = 0; i < size; i++) { + gcptr weakref = items[i]; + + /* weakrefs are immutable: during a major collection, they + cannot be in the nursery, and so there should be only one + version of each weakref object. XXX relying on this is + a bit fragile, but simplifies things a lot... */ + assert(weakref->h_revision & 1); + + if (!(weakref->h_tid & GCFLAG_VISITED)) { + /* the weakref itself dies */ + } + else { + size_t size = stmgc_size(weakref); + gcptr pointing_to = WEAKREF_PTR(weakref, size); + assert(pointing_to != NULL); + if (is_partially_visited(pointing_to)) { + pointing_to = stmgcpage_visit(pointing_to); + dprintf(("mweakref ptr moved %p->%p\n", + WEAKREF_PTR(weakref, size), + pointing_to)); + + assert(pointing_to->h_tid & GCFLAG_VISITED); + WEAKREF_PTR(weakref, size) = pointing_to; + } + else { + /* the weakref appears to be pointing to a dying object, + but we don't know for sure now. Clearing it is left + to clean_old_weakrefs(). */ + } + } + } +} + +static void clean_old_weakrefs(struct tx_public_descriptor *gcp) +{ + long i, size = gcp->old_weakrefs.size; + gcptr *items = gcp->old_weakrefs.items; + + for (i = size - 1; i >= 0; i--) { + gcptr weakref = items[i]; + assert(weakref->h_revision & 1); + if (weakref->h_tid & GCFLAG_VISITED) { + size_t size = stmgc_size(weakref); + gcptr pointing_to = WEAKREF_PTR(weakref, size); + if (pointing_to->h_tid & GCFLAG_VISITED) { + continue; /* the target stays alive, the weakref remains */ + } + dprintf(("mweakref lost ptr %p\n", WEAKREF_PTR(weakref, size))); + WEAKREF_PTR(weakref, size) = NULL; /* the target dies */ + } + /* remove this weakref from the list */ + items[i] = items[--gcp->old_weakrefs.size]; + } + gcptrlist_compress(&gcp->old_weakrefs); +} + +static void for_each_public_descriptor( + void visit(struct tx_public_descriptor *)) { + struct tx_descriptor *d; + for (d = stm_tx_head; d; d = d->tx_next) + visit(d->public_descriptor); + + struct tx_public_descriptor *gcp; + revision_t index = -1; + while ((gcp = stm_get_free_public_descriptor(&index)) != NULL) + visit(gcp); +} + +void stm_visit_old_weakrefs(void) +{ + /* Figure out which weakrefs survive, which possibly + adds more objects to 'objects_to_trace'. + */ + for_each_public_descriptor(visit_old_weakrefs); +} + +void stm_clean_old_weakrefs(void) +{ + /* Clean up the non-surviving weakrefs + */ + for_each_public_descriptor(clean_old_weakrefs); +} diff --git a/c4/weakref.h b/c4/weakref.h new file mode 100644 --- /dev/null +++ b/c4/weakref.h @@ -0,0 +1,10 @@ +#ifndef _SRCSTM_WEAKREF_H +#define _SRCSTM_WEAKREF_H + + +void stm_move_young_weakrefs(struct tx_descriptor *); +void stm_visit_old_weakrefs(void); +void stm_clean_old_weakrefs(void); + + +#endif diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -75,7 +75,7 @@ void _list_append(DuListObject *ob, DuObject *x) { - _du_write1(ob); + _du_read1(ob); DuTupleObject *olditems = ob->ob_tuple; _du_read1(olditems); @@ -85,6 +85,8 @@ DuTupleObject *newitems = DuTuple_New(newcount); _du_restore3(ob, x, olditems); + _du_write1(ob); + for (i=0; iob_items[i] = olditems->ob_items[i]; newitems->ob_items[newcount-1] = x; From noreply at buildbot.pypy.org Fri Jul 19 16:44:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 19 Jul 2013 16:44:16 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: try to improve on slowpath + fixes Message-ID: <20130719144416.7B0751C02BA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65481:23817367279e Date: 2013-07-19 16:43 +0200 http://bitbucket.org/pypy/pypy/changeset/23817367279e/ Log: try to improve on slowpath + fixes diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -106,8 +106,10 @@ kind='unicode') else: self.malloc_slowpath_unicode = None - self.cond_call_slowpath = [self._build_cond_call_slowpath(False), - self._build_cond_call_slowpath(True)] + self.cond_call_slowpath = [self._build_cond_call_slowpath(False, False), + self._build_cond_call_slowpath(False, True), + self._build_cond_call_slowpath(True, False), + self._build_cond_call_slowpath(True, True)] self._build_stack_check_slowpath() self._build_release_gil(gc_ll_descr.gcrootmap) diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -382,7 +382,7 @@ loc = self.reg_bindings.get(v, None) if loc is not None and loc not in self.no_lower_byte_regs: return loc - for i in range(len(self.free_regs)): + for i in range(len(self.free_regs) - 1, -1, -1): reg = self.free_regs[i] if reg not in self.no_lower_byte_regs: if loc is not None: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -141,7 +141,7 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._load_shadowstack_top_in_ebx(mc, gcrootmap) + self._load_shadowstack_top_in_reg(mc, gcrootmap) mc.MOV_mr((ebx.value, -WORD), eax.value) mc.MOV_bi(gcmap_ofs, 0) @@ -149,15 +149,15 @@ mc.RET() self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) - def _build_cond_call_slowpath(self, supports_floats): + def _build_cond_call_slowpath(self, supports_floats, callee_only): """ This builds a general call slowpath, for whatever call happens to come. """ mc = codebuf.MachineCodeBlockWrapper() - self._push_all_regs_to_frame(mc, [], supports_floats, callee_only=False) + self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._call_header_shadowstack(mc, gcrootmap) + self._call_header_shadowstack(mc, gcrootmap, selected_reg=r8) mc.SUB(esp, imm(WORD)) self.set_extra_stack_depth(mc, 2 * WORD) # args are in their respective positions @@ -166,9 +166,9 @@ self.set_extra_stack_depth(mc, 0) self._reload_frame_if_necessary(mc, align_stack=True) if gcrootmap and gcrootmap.is_shadow_stack: - self._call_footer_shadowstack(mc, gcrootmap) - self._pop_all_regs_from_frame(mc, [], self.cpu.supports_floats, - callee_only=False) + self._call_footer_shadowstack(mc, gcrootmap, selected_reg=r8) + self._pop_all_regs_from_frame(mc, [], supports_floats, + callee_only) mc.RET() return mc.materialize(self.cpu.asmmemmgr, []) @@ -757,34 +757,34 @@ self.mc.ADD_ri(esp.value, FRAME_FIXED_SIZE * WORD) self.mc.RET() - def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + def _load_shadowstack_top_in_reg(self, mc, gcrootmap, selected_reg=ebx): rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): - mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] + mc.MOV_rj(selected_reg.value, rst) # MOV ebx, [rootstacktop] else: mc.MOV_ri(X86_64_SCRATCH_REG.value, rst) # MOV r11, rootstacktop - mc.MOV_rm(ebx.value, (X86_64_SCRATCH_REG.value, 0)) + mc.MOV_rm(selected_reg.value, (X86_64_SCRATCH_REG.value, 0)) # MOV ebx, [r11] # return rst - def _call_header_shadowstack(self, mc, gcrootmap): - rst = self._load_shadowstack_top_in_ebx(mc, gcrootmap) - mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp - mc.ADD_ri(ebx.value, WORD) + def _call_header_shadowstack(self, mc, gcrootmap, selected_reg=ebx): + rst = self._load_shadowstack_top_in_reg(mc, gcrootmap, selected_reg) + mc.MOV_mr((selected_reg.value, 0), ebp.value) # MOV [ebx], ebp + mc.ADD_ri(selected_reg.value, WORD) if rx86.fits_in_32bits(rst): - mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx + mc.MOV_jr(rst, selected_reg.value) # MOV [rootstacktop], ebx else: mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), - ebx.value) # MOV [r11], ebx + selected_reg.value) # MOV [r11], ebx - def _call_footer_shadowstack(self, mc, gcrootmap): + def _call_footer_shadowstack(self, mc, gcrootmap, selected_reg=ebx): rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: - mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop - mc.SUB_mi8((ebx.value, 0), WORD) # SUB [ebx], WORD + mc.MOV_ri(selected_reg.value, rst) # MOV ebx, rootstacktop + mc.SUB_mi8((selected_reg.value, 0), WORD) # SUB [ebx], WORD def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking @@ -2152,10 +2152,17 @@ self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() self.push_gcmap(self.mc, gcmap, store=True) - if self._regalloc is not None and self._regalloc.xrm.reg_bindings: - cond_call_adr = self.cond_call_slowpath[1] - else: - cond_call_adr = self.cond_call_slowpath[0] + callee_only = False + floats = False + if self._regalloc is not None: + for reg in self._regalloc.rm.reg_bindings.values(): + if reg not in self._regalloc.rm.save_around_call_regs: + break + else: + callee_only = True + if self._regalloc.xrm.reg_bindings: + floats = True + cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] self.mc.CALL(imm(cond_call_adr)) self.pop_gcmap(self.mc) # never any result value From noreply at buildbot.pypy.org Fri Jul 19 20:31:07 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 20:31:07 +0200 (CEST) Subject: [pypy-commit] pypy default: More tests, checked to work on CPython's greenlet Message-ID: <20130719183107.B8C1A1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65482:694068eb90e4 Date: 2013-07-19 20:30 +0200 http://bitbucket.org/pypy/pypy/changeset/694068eb90e4/ Log: More tests, checked to work on CPython's greenlet diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -341,3 +341,22 @@ assert main.switch(3, x=5) == ((3,), {'x': 5}) assert main.switch(3, x=5, y=6) == ((3,), {'x': 5, 'y': 6}) assert main.switch(2, 3, x=6) == ((2, 3), {'x': 6}) + + def test_throw_GreenletExit_not_started(self): + import greenlet + def f(): + never_executed + g = greenlet.greenlet(f) + e = greenlet.GreenletExit() + x = g.throw(e) + assert x is e + + def test_throw_GreenletExit_already_finished(self): + import greenlet + def f(): + pass + g = greenlet.greenlet(f) + g.switch() + e = greenlet.GreenletExit() + x = g.throw(e) + assert x is e From noreply at buildbot.pypy.org Fri Jul 19 20:45:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 20:45:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the first of the two tests Message-ID: <20130719184543.AB93B1C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65483:e9608727658c Date: 2013-07-19 20:33 +0200 http://bitbucket.org/pypy/pypy/changeset/e9608727658c/ Log: Fix the first of the two tests diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -147,5 +147,8 @@ _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) From noreply at buildbot.pypy.org Fri Jul 19 20:45:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 20:45:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the second test. Message-ID: <20130719184544.E1C501C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65484:32a376d76708 Date: 2013-07-19 20:44 +0200 http://bitbucket.org/pypy/pypy/changeset/32a376d76708/ Log: Fix the second test. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -57,6 +57,7 @@ def __switch(target, methodname, *baseargs): current = getcurrent() + convert_greenletexit = True # while not (target.__main or _continulet.is_pending(target)): # inlined __nonzero__ ^^^ in case it's overridden @@ -75,6 +76,16 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw' and convert_greenletexit: + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + pass + convert_greenletexit = False # try: unbound_method = getattr(_continulet, methodname) From noreply at buildbot.pypy.org Fri Jul 19 20:52:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 20:52:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Test for the "except:" path. Tests that the exception class is only Message-ID: <20130719185253.2CED01C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65485:d2ffe8aaa387 Date: 2013-07-19 20:51 +0200 http://bitbucket.org/pypy/pypy/changeset/d2ffe8aaa387/ Log: Test for the "except:" path. Tests that the exception class is only instantiated once, and fix. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -84,8 +85,8 @@ methodname = 'switch' baseargs = (((e,), {}),) except: - pass - convert_greenletexit = False + baseargs = sys.exc_info()[:2] + baseargs[2:] + convert_greenletexit = False # try: unbound_method = getattr(_continulet, methodname) diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -360,3 +360,21 @@ e = greenlet.GreenletExit() x = g.throw(e) assert x is e + + def test_throw_exception_already_finished(self): + import greenlet + def f(): + pass + g = greenlet.greenlet(f) + g.switch() + seen = [] + class MyException(Exception): + def __init__(self): + seen.append(1) + try: + g.throw(MyException) + except MyException: + pass + else: + raise AssertionError("no exception??") + assert seen == [1] From noreply at buildbot.pypy.org Fri Jul 19 20:54:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 19 Jul 2013 20:54:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill this now-not-useful variable Message-ID: <20130719185424.221011C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65486:0c1ca879c6b0 Date: 2013-07-19 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/0c1ca879c6b0/ Log: Kill this now-not-useful variable diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -58,7 +58,6 @@ def __switch(target, methodname, *baseargs): current = getcurrent() - convert_greenletexit = True # while not (target.__main or _continulet.is_pending(target)): # inlined __nonzero__ ^^^ in case it's overridden @@ -78,7 +77,7 @@ # will show that the program is caught in this loop here.) target = target.parent # convert a "raise GreenletExit" into "return GreenletExit" - if methodname == 'throw' and convert_greenletexit: + if methodname == 'throw': try: raise baseargs[0], baseargs[1] except GreenletExit, e: @@ -86,7 +85,6 @@ baseargs = (((e,), {}),) except: baseargs = sys.exc_info()[:2] + baseargs[2:] - convert_greenletexit = False # try: unbound_method = getattr(_continulet, methodname) From noreply at buildbot.pypy.org Sat Jul 20 00:24:11 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 20 Jul 2013 00:24:11 +0200 (CEST) Subject: [pypy-commit] pypy default: added statvfs_result to posix Message-ID: <20130719222411.063EA1C02BA@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65487:b95b5d213403 Date: 2013-07-19 15:23 -0700 http://bitbucket.org/pypy/pypy/changeset/b95b5d213403/ Log: added statvfs_result to posix diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -34,6 +34,7 @@ appleveldefs = { 'error' : 'app_posix.error', 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', 'fdopen' : 'app_posix.fdopen', 'tmpfile' : 'app_posix.tmpfile', 'popen' : 'app_posix.popen', diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -65,6 +65,23 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] + +class statvfs_result: + __metaclass__ = structseqtype + + name = osname + ".statvfs_result" + + f_bsize = structseqfield(0) + f_frsize = structseqfield(1) + f_blocks = structseqfield(2) + f_bfree = structseqfield(3) + f_bavail = structseqfield(4) + f_files = structseqfield(5) + f_ffree = structseqfield(6) + f_favail = structseqfield(7) + f_flag = structseqfield(8) + f_namemax = structseqfield(9) + if osname == 'posix': # POSIX: we want to check the file descriptor when fdopen() is called, # not later when we read or write data. So we call fstat(), letting From noreply at buildbot.pypy.org Sat Jul 20 01:10:51 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 01:10:51 +0200 (CEST) Subject: [pypy-commit] pypy default: fill in is_none Message-ID: <20130719231051.C84E91C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65488:bd08271bef4a Date: 2013-07-19 16:00 -0700 http://bitbucket.org/pypy/pypy/changeset/bd08271bef4a/ Log: fill in is_none diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py --- a/pypy/tool/pytest/objspace.py +++ b/pypy/tool/pytest/objspace.py @@ -94,6 +94,9 @@ def is_true(self, obj): return bool(obj) + def is_none(self, obj): + return obj is None + def str_w(self, w_str): return w_str From noreply at buildbot.pypy.org Sat Jul 20 01:10:53 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 01:10:53 +0200 (CEST) Subject: [pypy-commit] pypy default: py3k branch compat. Message-ID: <20130719231053.032271C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65489:40a0a35f612c Date: 2013-07-19 16:02 -0700 http://bitbucket.org/pypy/pypy/changeset/40a0a35f612c/ Log: py3k branch compat. diff --git a/pypy/module/test_lib_pypy/test_tputil.py b/pypy/module/test_lib_pypy/test_tputil.py --- a/pypy/module/test_lib_pypy/test_tputil.py +++ b/pypy/module/test_lib_pypy/test_tputil.py @@ -39,7 +39,7 @@ l = [] def f(*args): - print args + print(args) tp = make_proxy(f, type=A) #tp.__getslice__(0, 1) From noreply at buildbot.pypy.org Sat Jul 20 01:10:54 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 01:10:54 +0200 (CEST) Subject: [pypy-commit] pypy default: convert to formal app level tests so they have a chance of running on the py3k Message-ID: <20130719231054.2CE691C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65490:2997e5b63e2e Date: 2013-07-19 16:08 -0700 http://bitbucket.org/pypy/pypy/changeset/2997e5b63e2e/ Log: convert to formal app level tests so they have a chance of running on the py3k branch. they'll run slower but that can be avoided w/ appdirect mode diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/support.py @@ -0,0 +1,34 @@ +import py + +from pypy.conftest import option +from pypy.interpreter.error import OperationError + +def import_lib_pypy(space, name, skipmsg=None): + """Import a top level module ensuring it's sourced from the lib_pypy + package. + + Raises a pytest Skip on ImportError if a skip message was specified. + """ + if option.runappdirect: + try: + mod = __import__('lib_pypy.' + name) + except ImportError as e: + if skipmsg is not None: + py.test.skip('%s (%s))' % (skipmsg, str(e))) + raise + return getattr(mod, name) + + try: + # Assume app-level import finds it from the right place (we + # assert so afterwards). It should as long as a builtin module + # overshadows it + w_mod = space.appexec([], "(): import %s; return %s" % (name, name)) + except OperationError as e: + if skipmsg is not None or not e.match(space, space.w_ImportError): + raise + py.test.skip('%s (%s))' % (skipmsg, str(e))) + w_file = space.getattr(w_mod, space.wrap('__file__')) + assert space.is_true(space.contains(w_file, space.wrap('lib_pypy'))), \ + ("%s didn't import from lib_pypy. Is a usemodules directive " + "overshadowing it?" % name) + return w_mod diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -1,26 +1,32 @@ -from __future__ import absolute_import -import py -try: - from lib_pypy import grp -except ImportError: - py.test.skip("No grp module on this platform") +from pypy.module.test_lib_pypy.support import import_lib_pypy -def test_basic(): - g = grp.getgrnam("root") - assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] - assert g.gr_name == 'root' - assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) -def test_extra(): - py.test.raises(TypeError, grp.getgrnam, False) - py.test.raises(TypeError, grp.getgrnam, None) +class AppTestGrp: -def test_struct_group(): - g = grp.struct_group((10, 20, 30, 40)) - assert len(g) == 4 - assert list(g) == [10, 20, 30, 40] - assert g.gr_name == 10 - assert g.gr_passwd == 20 - assert g.gr_gid == 30 - assert g.gr_mem == 40 + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + + def setup_class(cls): + cls.w_grp = import_lib_pypy(cls.space, 'grp', + "No grp module on this platform") + + def test_basic(self): + g = self.grp.getgrnam("root") + assert g.gr_gid == 0 + assert g.gr_mem == ['root'] or g.gr_mem == [] + assert g.gr_name == 'root' + assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) + + def test_extra(self): + grp = self.grp + print(grp.__file__) + raises(TypeError, grp.getgrnam, False) + raises(TypeError, grp.getgrnam, None) + + def test_struct_group(self): + g = self.grp.struct_group((10, 20, 30, 40)) + assert len(g) == 4 + assert list(g) == [10, 20, 30, 40] + assert g.gr_name == 10 + assert g.gr_passwd == 20 + assert g.gr_gid == 30 + assert g.gr_mem == 40 diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -1,18 +1,29 @@ # Generates the resource cache (it might be there already, but maybe not) from __future__ import absolute_import -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('resource.ctc.py') - import os -if hasattr(os, 'wait3'): - from lib_pypy._pypy_wait import wait3 - def test_os_wait3(): +import py + +from lib_pypy.ctypes_config_cache import rebuild +from pypy.module.test_lib_pypy.support import import_lib_pypy + + +class AppTestOsWait: + + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + + def setup_class(cls): + if not hasattr(os, "fork"): + py.test.skip("Need fork() to test wait3/wait4()") + rebuild.rebuild_one('resource.ctc.py') + cls.w__pypy_wait = import_lib_pypy( + cls.space, '_pypy_wait', + '_pypy_wait not supported on this platform') + + def test_os_wait3(self): + import os + wait3 = self._pypy_wait.wait3 exit_status = 0x33 - - if not hasattr(os, "fork"): - skip("Need fork() to test wait3()") - child = os.fork() if child == 0: # in child os._exit(exit_status) @@ -24,14 +35,10 @@ assert isinstance(rusage.ru_utime, float) assert isinstance(rusage.ru_maxrss, int) -if hasattr(os, 'wait4'): - from lib_pypy._pypy_wait import wait4 - def test_os_wait4(): + def test_os_wait4(self): + import os + wait4 = self._pypy_wait.wait4 exit_status = 0x33 - - if not hasattr(os, "fork"): - skip("Need fork() to test wait4()") - child = os.fork() if child == 0: # in child os._exit(exit_status) diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -1,36 +1,41 @@ from __future__ import absolute_import -import py -try: - from lib_pypy import resource -except ImportError: - py.test.skip('no resource module available') from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('resource.ctc.py') +from pypy.module.test_lib_pypy.support import import_lib_pypy -def test_resource(): - x = resource.getrusage(resource.RUSAGE_SELF) - assert len(x) == 16 - assert x[0] == x[-16] == x.ru_utime - assert x[1] == x[-15] == x.ru_stime - assert x[2] == x[-14] == x.ru_maxrss - assert x[3] == x[-13] == x.ru_ixrss - assert x[4] == x[-12] == x.ru_idrss - assert x[5] == x[-11] == x.ru_isrss - assert x[6] == x[-10] == x.ru_minflt - assert x[7] == x[-9] == x.ru_majflt - assert x[8] == x[-8] == x.ru_nswap - assert x[9] == x[-7] == x.ru_inblock - assert x[10] == x[-6] == x.ru_oublock - assert x[11] == x[-5] == x.ru_msgsnd - assert x[12] == x[-4] == x.ru_msgrcv - assert x[13] == x[-3] == x.ru_nsignals - assert x[14] == x[-2] == x.ru_nvcsw - assert x[15] == x[-1] == x.ru_nivcsw - for i in range(16): - if i < 2: - expected_type = float - else: - expected_type = (int, long) - assert isinstance(x[i], expected_type) +class AppTestResource: + + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + + def setup_class(cls): + rebuild.rebuild_one('resource.ctc.py') + cls.w_resource = import_lib_pypy(cls.space, 'resource', + 'No resource module available') + + def test_resource(self): + resource = self.resource + x = resource.getrusage(resource.RUSAGE_SELF) + assert len(x) == 16 + assert x[0] == x[-16] == x.ru_utime + assert x[1] == x[-15] == x.ru_stime + assert x[2] == x[-14] == x.ru_maxrss + assert x[3] == x[-13] == x.ru_ixrss + assert x[4] == x[-12] == x.ru_idrss + assert x[5] == x[-11] == x.ru_isrss + assert x[6] == x[-10] == x.ru_minflt + assert x[7] == x[-9] == x.ru_majflt + assert x[8] == x[-8] == x.ru_nswap + assert x[9] == x[-7] == x.ru_inblock + assert x[10] == x[-6] == x.ru_oublock + assert x[11] == x[-5] == x.ru_msgsnd + assert x[12] == x[-4] == x.ru_msgrcv + assert x[13] == x[-3] == x.ru_nsignals + assert x[14] == x[-2] == x.ru_nvcsw + assert x[15] == x[-1] == x.ru_nivcsw + for i in range(16): + if i < 2: + expected_type = float + else: + expected_type = (int, long) + assert isinstance(x[i], expected_type) diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py --- a/pypy/module/test_lib_pypy/test_sha_extra.py +++ b/pypy/module/test_lib_pypy/test_sha_extra.py @@ -1,14 +1,21 @@ -# Testing sha module (NIST's Secure Hash Algorithm) +"""Testing sha module (NIST's Secure Hash Algorithm) -# use the three examples from Federal Information Processing Standards -# Publication 180-1, Secure Hash Standard, 1995 April 17 -# http://www.itl.nist.gov/div897/pubs/fip180-1.htm -from __future__ import absolute_import -from lib_pypy import _sha as pysha +use the three examples from Federal Information Processing Standards +Publication 180-1, Secure Hash Standard, 1995 April 17 +http://www.itl.nist.gov/div897/pubs/fip180-1.htm +""" +from pypy.module.test_lib_pypy.support import import_lib_pypy -class TestSHA: - def check(self, data, digest): - computed = pysha.new(data).hexdigest() + +class AppTestSHA: + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + cls.w__sha = import_lib_pypy(cls.space, '_sha') + + def w_check(self, data, digest): + computed = self._sha.new(data).hexdigest() assert computed == digest def test_case_1(self): @@ -23,10 +30,10 @@ self.check("a" * 1000000, "34aa973cd4c4daa4f61eeb2bdbad27316534016f") - -def test_attributes(): - assert pysha.digest_size == 20 - assert pysha.digestsize == 20 - assert pysha.blocksize == 1 - assert pysha.new().digest_size == 20 - assert pysha.new().digestsize == 20 + def test_attributes(self): + _sha = self._sha + assert _sha.digest_size == 20 + assert _sha.digestsize == 20 + assert _sha.blocksize == 1 + assert _sha.new().digest_size == 20 + assert _sha.new().digestsize == 20 diff --git a/pypy/module/test_lib_pypy/test_structseq.py b/pypy/module/test_lib_pypy/test_structseq.py --- a/pypy/module/test_lib_pypy/test_structseq.py +++ b/pypy/module/test_lib_pypy/test_structseq.py @@ -1,80 +1,106 @@ -from __future__ import absolute_import -import py -from lib_pypy._structseq import structseqfield, structseqtype +from pypy.module.test_lib_pypy.support import import_lib_pypy -class mydata: - __metaclass__ = structseqtype +class AppTestStructseq: - st_mode = structseqfield(0, "protection bits") - st_ino = structseqfield(1) - st_dev = structseqfield(2) - st_nlink = structseqfield(3) - st_uid = structseqfield(4) - st_gid = structseqfield(5) - st_size = structseqfield(6) - _st_atime_as_int = structseqfield(7) - _st_mtime_as_int = structseqfield(8) - _st_ctime_as_int = structseqfield(9) - # skip to higher numbers for fields not part of the sequence. - # the numbers are only used to ordering - st_rdev = structseqfield(50, "device type (if inode device)") - st_atime = structseqfield(57, default=lambda self: self._st_atime_as_int) - st_mtime = structseqfield(58, default=lambda self: self._st_mtime_as_int) - st_ctime = structseqfield(59, default=lambda self: self._st_ctime_as_int) + spaceconfig = dict(usemodules=('binascii', 'struct',)) + def setup_class(cls): + cls.w__structseq = import_lib_pypy(cls.space, '_structseq') -def test_class(): - assert mydata.st_mode.__doc__ == "protection bits" - assert mydata.n_fields == 14 - assert mydata.n_sequence_fields == 10 - assert mydata.n_unnamed_fields == 0 + def w_get_mydata(self): + _structseq = self._structseq + ssfield = _structseq.structseqfield + class mydata: + __metaclass__ = _structseq.structseqtype -def test_mydata(): - x = mydata(range(100, 111)) - assert x.n_sequence_fields == type(x).n_sequence_fields == 10 - assert x.n_fields == type(x).n_fields == 14 - assert x.st_mode == 100 - assert x.st_size == 106 - assert x.st_ctime == 109 # copied by the default=lambda... - assert x.st_rdev == 110 - assert len(x) == 10 - assert list(x) == range(100, 110) - assert x + (5,) == tuple(range(100, 110)) + (5,) - assert x[4:12:2] == (104, 106, 108) - assert 104 in x - assert 110 not in x + st_mode = ssfield(0, "protection bits") + st_ino = ssfield(1) + st_dev = ssfield(2) + st_nlink = ssfield(3) + st_uid = ssfield(4) + st_gid = ssfield(5) + st_size = ssfield(6) + _st_atime_as_int = ssfield(7) + _st_mtime_as_int = ssfield(8) + _st_ctime_as_int = ssfield(9) + # skip to higher numbers for fields not part of the sequence. + # the numbers are only used to ordering + st_rdev = ssfield(50, "device type (if inode device)") + st_atime = ssfield(57, + default=lambda self: self._st_atime_as_int) + st_mtime = ssfield(58, + default=lambda self: self._st_mtime_as_int) + st_ctime = ssfield(59, + default=lambda self: self._st_ctime_as_int) + return mydata -def test_default_None(): - x = mydata(range(100, 110)) - assert x.st_rdev is None + def test_class(self): + mydata = self.get_mydata() + assert mydata.st_mode.__doc__ == "protection bits" + assert mydata.n_fields == 14 + assert mydata.n_sequence_fields == 10 + assert mydata.n_unnamed_fields == 0 -def test_constructor(): - x = mydata(range(100, 111), {'st_mtime': 12.25}) - assert x[8] == 108 - assert x.st_mtime == 12.25 + def test_mydata(self): + mydata = self.get_mydata() + x = mydata(range(100, 111)) + assert x.n_sequence_fields == type(x).n_sequence_fields == 10 + assert x.n_fields == type(x).n_fields == 14 + assert x.st_mode == 100 + assert x.st_size == 106 + assert x.st_ctime == 109 # copied by the default=lambda... + assert x.st_rdev == 110 + assert len(x) == 10 + assert list(x) == range(100, 110) + assert x + (5,) == tuple(range(100, 110)) + (5,) + assert x[4:12:2] == (104, 106, 108) + assert 104 in x + assert 110 not in x -def test_compare_like_tuple(): - x = mydata(range(100, 111)) - y = mydata(range(100, 110) + [555]) - assert x == tuple(range(100, 110)) - assert x == y # blame CPython - assert hash(x) == hash(y) == hash(tuple(range(100, 110))) + def test_default_None(self): + mydata = self.get_mydata() + x = mydata(range(100, 110)) + assert x.st_rdev is None -def test_pickle(): - import pickle - x = mydata(range(100, 111)) - s = pickle.dumps(x) - y = pickle.loads(s) - assert x == y - assert x.st_rdev == y.st_rdev == 110 + def test_constructor(self): + mydata = self.get_mydata() + x = mydata(range(100, 111), {'st_mtime': 12.25}) + assert x[8] == 108 + assert x.st_mtime == 12.25 -def test_readonly(): - x = mydata(range(100, 113)) - py.test.raises((TypeError, AttributeError), "x.st_mode = 1") - py.test.raises((TypeError, AttributeError), "x.st_mtime = 1") - py.test.raises((TypeError, AttributeError), "x.st_rdev = 1") + def test_compare_like_tuple(self): + mydata = self.get_mydata() + x = mydata(range(100, 111)) + y = mydata(range(100, 110) + [555]) + assert x == tuple(range(100, 110)) + assert x == y # blame CPython + assert hash(x) == hash(y) == hash(tuple(range(100, 110))) -def test_no_extra_assignments(): - x = mydata(range(100, 113)) - py.test.raises((TypeError, AttributeError), "x.some_random_attribute = 1") + def test_pickle(self): + import pickle + import sys + import types + sys.modules['mod'] = mod = types.ModuleType('mod') + try: + mod.mydata = mydata = self.get_mydata() + mydata.__module__ = 'mod' + x = mydata(range(100, 111)) + s = pickle.dumps(x) + y = pickle.loads(s) + assert x == y + assert x.st_rdev == y.st_rdev == 110 + finally: + del sys.modules['mod'] + + def test_readonly(self): + mydata = self.get_mydata() + x = mydata(range(100, 113)) + raises((TypeError, AttributeError), "x.st_mode = 1") + raises((TypeError, AttributeError), "x.st_mtime = 1") + raises((TypeError, AttributeError), "x.st_rdev = 1") + + def test_no_extra_assignments(self): + mydata = self.get_mydata() + x = mydata(range(100, 113)) + raises((TypeError, AttributeError), "x.some_random_attribute = 1") From noreply at buildbot.pypy.org Sat Jul 20 02:21:15 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 02:21:15 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130720002115.044241C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65491:ffec7324af3c Date: 2013-07-19 16:25 -0700 http://bitbucket.org/pypy/pypy/changeset/ffec7324af3c/ Log: merge default diff too long, truncating to 2000 out of 2080 lines diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -75,6 +76,15 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0](baseargs[1]) + except GreenletExit as e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) @@ -147,5 +157,8 @@ _tls.current = greenlet try: raise value.with_traceback(tb) + except GreenletExit as e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -16,3 +16,18 @@ .. branch: flowoperators Simplify rpython/flowspace/ code by using more metaprogramming. Create SpaceOperator class to gather static information about flow graph operations. + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. + +.. branch: ssl_moving_write_buffer diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -195,6 +195,11 @@ print ("Python", sys.version, file=sys.stderr) raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -373,6 +378,7 @@ '--version': (print_version, None), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -36,6 +36,20 @@ } +class IntOpModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'int_add': 'interp_intop.int_add', + 'int_sub': 'interp_intop.int_sub', + 'int_mul': 'interp_intop.int_mul', + 'int_floordiv': 'interp_intop.int_floordiv', + 'int_mod': 'interp_intop.int_mod', + 'int_lshift': 'interp_intop.int_lshift', + 'int_rshift': 'interp_intop.int_rshift', + 'uint_rshift': 'interp_intop.uint_rshift', + } + + class Module(MixedModule): appleveldefs = { } @@ -66,6 +80,7 @@ "builders": BuildersModule, "time": TimeModule, "thread": ThreadModule, + "intop": IntOpModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_intop.py @@ -0,0 +1,39 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rarithmetic import r_uint, intmask + + + at unwrap_spec(n=int, m=int) +def int_add(space, n, m): + return space.wrap(llop.int_add(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_sub(space, n, m): + return space.wrap(llop.int_sub(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mul(space, n, m): + return space.wrap(llop.int_mul(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_floordiv(space, n, m): + return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mod(space, n, m): + return space.wrap(llop.int_mod(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_lshift(space, n, m): + return space.wrap(llop.int_lshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_rshift(space, n, m): + return space.wrap(llop.int_rshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def uint_rshift(space, n, m): + n = r_uint(n) + x = llop.uint_rshift(lltype.Unsigned, n, m) + return space.wrap(intmask(x)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_intop.py @@ -0,0 +1,104 @@ + + +class AppTestIntOp: + spaceconfig = dict(usemodules=['__pypy__']) + + def w_intmask(self, n): + import sys + n &= (sys.maxint*2+1) + if n > sys.maxint: + n -= 2*(sys.maxint+1) + return int(n) + + def test_intmask(self): + import sys + assert self.intmask(sys.maxint) == sys.maxint + assert self.intmask(sys.maxint+1) == -sys.maxint-1 + assert self.intmask(-sys.maxint-2) == sys.maxint + N = 2 ** 128 + assert self.intmask(N+sys.maxint) == sys.maxint + assert self.intmask(N+sys.maxint+1) == -sys.maxint-1 + assert self.intmask(N-sys.maxint-2) == sys.maxint + + def test_int_add(self): + import sys + from __pypy__ import intop + assert intop.int_add(40, 2) == 42 + assert intop.int_add(sys.maxint, 1) == -sys.maxint-1 + assert intop.int_add(-2, -sys.maxint) == sys.maxint + + def test_int_sub(self): + import sys + from __pypy__ import intop + assert intop.int_sub(40, -2) == 42 + assert intop.int_sub(sys.maxint, -1) == -sys.maxint-1 + assert intop.int_sub(-2, sys.maxint) == sys.maxint + + def test_int_mul(self): + import sys + from __pypy__ import intop + assert intop.int_mul(40, -2) == -80 + assert intop.int_mul(-sys.maxint, -sys.maxint) == ( + self.intmask(sys.maxint ** 2)) + + def test_int_floordiv(self): + import sys + from __pypy__ import intop + assert intop.int_floordiv(41, 3) == 13 + assert intop.int_floordiv(41, -3) == -13 + assert intop.int_floordiv(-41, 3) == -13 + assert intop.int_floordiv(-41, -3) == 13 + assert intop.int_floordiv(-sys.maxint, -1) == sys.maxint + assert intop.int_floordiv(sys.maxint, -1) == -sys.maxint + + def test_int_mod(self): + import sys + from __pypy__ import intop + assert intop.int_mod(41, 3) == 2 + assert intop.int_mod(41, -3) == 2 + assert intop.int_mod(-41, 3) == -2 + assert intop.int_mod(-41, -3) == -2 + assert intop.int_mod(-sys.maxint, -1) == 0 + assert intop.int_mod(sys.maxint, -1) == 0 + + def test_int_lshift(self): + import sys + from __pypy__ import intop + if sys.maxint == 2**31-1: + bits = 32 + else: + bits = 64 + assert intop.int_lshift(42, 3) == 42 << 3 + assert intop.int_lshift(0, 3333) == 0 + assert intop.int_lshift(1, bits-2) == 1 << (bits-2) + assert intop.int_lshift(1, bits-1) == -sys.maxint-1 == (-1) << (bits-1) + assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) + assert intop.int_lshift(-1, bits-1) == -sys.maxint-1 + assert intop.int_lshift(sys.maxint // 3, 2) == ( + self.intmask((sys.maxint // 3) << 2)) + assert intop.int_lshift(-sys.maxint // 3, 2) == ( + self.intmask((-sys.maxint // 3) << 2)) + + def test_int_rshift(self): + from __pypy__ import intop + assert intop.int_rshift(42, 3) == 42 >> 3 + assert intop.int_rshift(-42, 3) == (-42) >> 3 + assert intop.int_rshift(0, 3333) == 0 + assert intop.int_rshift(-1, 0) == -1 + assert intop.int_rshift(-1, 1) == -1 + + def test_uint_rshift(self): + import sys + from __pypy__ import intop + if sys.maxint == 2**31-1: + bits = 32 + else: + bits = 64 + N = 1 << bits + assert intop.uint_rshift(42, 3) == 42 >> 3 + assert intop.uint_rshift(-42, 3) == (N-42) >> 3 + assert intop.uint_rshift(0, 3333) == 0 + assert intop.uint_rshift(-1, 0) == -1 + assert intop.uint_rshift(-1, 1) == sys.maxint + assert intop.uint_rshift(-1, bits-2) == 3 + assert intop.uint_rshift(-1, bits-1) == 1 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1219,6 +1219,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1238,6 +1286,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") @@ -2760,6 +2832,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1100,6 +1100,13 @@ S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) S2E.get_ffi_type() # does not hang + def test_overflow_error(self): + import _rawffi + A = _rawffi.Array('d') + arg1 = A(1) + raises(OverflowError, "arg1[0] = 10**900") + arg1.free() + def test_char_array_int(self): import _rawffi A = _rawffi.Array('c') diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -857,7 +857,10 @@ ss.ssl = libssl_SSL_new(ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) if server_hostname: libssl_SSL_set_tlsext_host_name(ss.ssl, server_hostname); diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -280,7 +280,7 @@ backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): - return rffi.cast(lltype.Signed, self.storage) + return rffi.cast(lltype.Signed, self.storage) + self.start def get_storage(self): return self.storage diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2212,6 +2212,11 @@ a = a[::2] i = a.__array_interface__ assert isinstance(i['data'][0], int) + b = array(range(9), dtype=int) + c = b[3:5] + b_data = b.__array_interface__['data'][0] + c_data = c.__array_interface__['data'][0] + assert b_data + 3 * b.dtype.itemsize == c_data def test_array_indexing_one_elem(self): from numpypy import array, arange diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -35,6 +35,7 @@ 'error' : 'app_posix.error', 'stat_result': 'app_posix.stat_result', 'urandom': 'app_posix.urandom', + 'statvfs_result': 'app_posix.statvfs_result', } if os.name == 'nt': del appleveldefs['urandom'] # at interp on win32 diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -63,6 +63,23 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] + +class statvfs_result: + __metaclass__ = structseqtype + + name = osname + ".statvfs_result" + + f_bsize = structseqfield(0) + f_frsize = structseqfield(1) + f_blocks = structseqfield(2) + f_bfree = structseqfield(3) + f_bavail = structseqfield(4) + f_files = structseqfield(5) + f_ffree = structseqfield(6) + f_favail = structseqfield(7) + f_flag = structseqfield(8) + f_namemax = structseqfield(9) + if osname == 'posix': # POSIX: we want to check the file descriptor when fdopen() is called, # not later when we read or write data. So we call fstat(), letting diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -43,6 +43,7 @@ assert isinstance(res, str) rctime.ctime(rctime.time()) raises(ValueError, rctime.ctime, 1E200) + raises(OverflowError, rctime.ctime, 10**900) for year in [-100, 100, 1000, 2000, 10000]: try: testval = rctime.mktime((year, 1, 10) + (0,)*6) diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/support.py @@ -0,0 +1,34 @@ +import py + +from pypy.conftest import option +from pypy.interpreter.error import OperationError + +def import_lib_pypy(space, name, skipmsg=None): + """Import a top level module ensuring it's sourced from the lib_pypy + package. + + Raises a pytest Skip on ImportError if a skip message was specified. + """ + if option.runappdirect: + try: + mod = __import__('lib_pypy.' + name) + except ImportError as e: + if skipmsg is not None: + py.test.skip('%s (%s))' % (skipmsg, str(e))) + raise + return getattr(mod, name) + + try: + # Assume app-level import finds it from the right place (we + # assert so afterwards). It should as long as a builtin module + # overshadows it + w_mod = space.appexec([], "(): import %s; return %s" % (name, name)) + except OperationError as e: + if skipmsg is not None or not e.match(space, space.w_ImportError): + raise + py.test.skip('%s (%s))' % (skipmsg, str(e))) + w_file = space.getattr(w_mod, space.wrap('__file__')) + assert space.is_true(space.contains(w_file, space.wrap('lib_pypy'))), \ + ("%s didn't import from lib_pypy. Is a usemodules directive " + "overshadowing it?" % name) + return w_mod diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -341,3 +341,40 @@ assert main.switch(3, x=5) == ((3,), {'x': 5}) assert main.switch(3, x=5, y=6) == ((3,), {'x': 5, 'y': 6}) assert main.switch(2, 3, x=6) == ((2, 3), {'x': 6}) + + def test_throw_GreenletExit_not_started(self): + import greenlet + def f(): + never_executed + g = greenlet.greenlet(f) + e = greenlet.GreenletExit() + x = g.throw(e) + assert x is e + + def test_throw_GreenletExit_already_finished(self): + import greenlet + def f(): + pass + g = greenlet.greenlet(f) + g.switch() + e = greenlet.GreenletExit() + x = g.throw(e) + assert x is e + + def test_throw_exception_already_finished(self): + import greenlet + def f(): + pass + g = greenlet.greenlet(f) + g.switch() + seen = [] + class MyException(Exception): + def __init__(self): + seen.append(1) + try: + g.throw(MyException) + except MyException: + pass + else: + raise AssertionError("no exception??") + assert seen == [1] diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -1,25 +1,32 @@ -import py -try: - from lib_pypy import grp -except ImportError: - py.test.skip("No grp module on this platform") +from pypy.module.test_lib_pypy.support import import_lib_pypy -def test_basic(): - g = grp.getgrnam("root") - assert g.gr_gid == 0 - assert g.gr_mem == ['root'] or g.gr_mem == [] - assert g.gr_name == 'root' - assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) -def test_extra(): - py.test.raises(TypeError, grp.getgrnam, False) - py.test.raises(TypeError, grp.getgrnam, None) +class AppTestGrp: -def test_struct_group(): - g = grp.struct_group((10, 20, 30, 40)) - assert len(g) == 4 - assert list(g) == [10, 20, 30, 40] - assert g.gr_name == 10 - assert g.gr_passwd == 20 - assert g.gr_gid == 30 - assert g.gr_mem == 40 + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + + def setup_class(cls): + cls.w_grp = import_lib_pypy(cls.space, 'grp', + "No grp module on this platform") + + def test_basic(self): + g = self.grp.getgrnam("root") + assert g.gr_gid == 0 + assert g.gr_mem == ['root'] or g.gr_mem == [] + assert g.gr_name == 'root' + assert isinstance(g.gr_passwd, str) # usually just 'x', don't hope :-) + + def test_extra(self): + grp = self.grp + print(grp.__file__) + raises(TypeError, grp.getgrnam, False) + raises(TypeError, grp.getgrnam, None) + + def test_struct_group(self): + g = self.grp.struct_group((10, 20, 30, 40)) + assert len(g) == 4 + assert list(g) == [10, 20, 30, 40] + assert g.gr_name == 10 + assert g.gr_passwd == 20 + assert g.gr_gid == 30 + assert g.gr_mem == 40 diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -1,18 +1,29 @@ # Generates the resource cache (it might be there already, but maybe not) from __future__ import absolute_import -from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('resource.ctc.py') - import os -if hasattr(os, 'wait3'): - from lib_pypy._pypy_wait import wait3 - def test_os_wait3(): +import py + +from lib_pypy.ctypes_config_cache import rebuild +from pypy.module.test_lib_pypy.support import import_lib_pypy + + +class AppTestOsWait: + + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + + def setup_class(cls): + if not hasattr(os, "fork"): + py.test.skip("Need fork() to test wait3/wait4()") + rebuild.rebuild_one('resource.ctc.py') + cls.w__pypy_wait = import_lib_pypy( + cls.space, '_pypy_wait', + '_pypy_wait not supported on this platform') + + def test_os_wait3(self): + import os + wait3 = self._pypy_wait.wait3 exit_status = 0x33 - - if not hasattr(os, "fork"): - skip("Need fork() to test wait3()") - child = os.fork() if child == 0: # in child os._exit(exit_status) @@ -24,14 +35,10 @@ assert isinstance(rusage.ru_utime, float) assert isinstance(rusage.ru_maxrss, int) -if hasattr(os, 'wait4'): - from lib_pypy._pypy_wait import wait4 - def test_os_wait4(): + def test_os_wait4(self): + import os + wait4 = self._pypy_wait.wait4 exit_status = 0x33 - - if not hasattr(os, "fork"): - skip("Need fork() to test wait4()") - child = os.fork() if child == 0: # in child os._exit(exit_status) diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -1,35 +1,41 @@ -import py -try: - from lib_pypy import resource -except ImportError: - py.test.skip('no resource module available') +from __future__ import absolute_import from lib_pypy.ctypes_config_cache import rebuild -rebuild.rebuild_one('resource.ctc.py') +from pypy.module.test_lib_pypy.support import import_lib_pypy -def test_resource(): - x = resource.getrusage(resource.RUSAGE_SELF) - assert len(x) == 16 - assert x[0] == x[-16] == x.ru_utime - assert x[1] == x[-15] == x.ru_stime - assert x[2] == x[-14] == x.ru_maxrss - assert x[3] == x[-13] == x.ru_ixrss - assert x[4] == x[-12] == x.ru_idrss - assert x[5] == x[-11] == x.ru_isrss - assert x[6] == x[-10] == x.ru_minflt - assert x[7] == x[-9] == x.ru_majflt - assert x[8] == x[-8] == x.ru_nswap - assert x[9] == x[-7] == x.ru_inblock - assert x[10] == x[-6] == x.ru_oublock - assert x[11] == x[-5] == x.ru_msgsnd - assert x[12] == x[-4] == x.ru_msgrcv - assert x[13] == x[-3] == x.ru_nsignals - assert x[14] == x[-2] == x.ru_nvcsw - assert x[15] == x[-1] == x.ru_nivcsw - for i in range(16): - if i < 2: - expected_type = float - else: - expected_type = int - assert isinstance(x[i], expected_type) +class AppTestResource: + + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + + def setup_class(cls): + rebuild.rebuild_one('resource.ctc.py') + cls.w_resource = import_lib_pypy(cls.space, 'resource', + 'No resource module available') + + def test_resource(self): + resource = self.resource + x = resource.getrusage(resource.RUSAGE_SELF) + assert len(x) == 16 + assert x[0] == x[-16] == x.ru_utime + assert x[1] == x[-15] == x.ru_stime + assert x[2] == x[-14] == x.ru_maxrss + assert x[3] == x[-13] == x.ru_ixrss + assert x[4] == x[-12] == x.ru_idrss + assert x[5] == x[-11] == x.ru_isrss + assert x[6] == x[-10] == x.ru_minflt + assert x[7] == x[-9] == x.ru_majflt + assert x[8] == x[-8] == x.ru_nswap + assert x[9] == x[-7] == x.ru_inblock + assert x[10] == x[-6] == x.ru_oublock + assert x[11] == x[-5] == x.ru_msgsnd + assert x[12] == x[-4] == x.ru_msgrcv + assert x[13] == x[-3] == x.ru_nsignals + assert x[14] == x[-2] == x.ru_nvcsw + assert x[15] == x[-1] == x.ru_nivcsw + for i in range(16): + if i < 2: + expected_type = float + else: + expected_type = (int, long) + assert isinstance(x[i], expected_type) diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py --- a/pypy/module/test_lib_pypy/test_sha_extra.py +++ b/pypy/module/test_lib_pypy/test_sha_extra.py @@ -1,13 +1,21 @@ -# Testing sha module (NIST's Secure Hash Algorithm) +"""Testing sha module (NIST's Secure Hash Algorithm) -# use the three examples from Federal Information Processing Standards -# Publication 180-1, Secure Hash Standard, 1995 April 17 -# http://www.itl.nist.gov/div897/pubs/fip180-1.htm -from lib_pypy import _sha as pysha +use the three examples from Federal Information Processing Standards +Publication 180-1, Secure Hash Standard, 1995 April 17 +http://www.itl.nist.gov/div897/pubs/fip180-1.htm +""" +from pypy.module.test_lib_pypy.support import import_lib_pypy -class TestSHA: - def check(self, data, digest): - computed = pysha.new(data).hexdigest() + +class AppTestSHA: + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + cls.w__sha = import_lib_pypy(cls.space, '_sha') + + def w_check(self, data, digest): + computed = self._sha.new(data).hexdigest() assert computed == digest def test_case_1(self): @@ -22,10 +30,10 @@ self.check("a" * 1000000, "34aa973cd4c4daa4f61eeb2bdbad27316534016f") - -def test_attributes(): - assert pysha.digest_size == 20 - assert pysha.digestsize == 20 - assert pysha.blocksize == 1 - assert pysha.new().digest_size == 20 - assert pysha.new().digestsize == 20 + def test_attributes(self): + _sha = self._sha + assert _sha.digest_size == 20 + assert _sha.digestsize == 20 + assert _sha.blocksize == 1 + assert _sha.new().digest_size == 20 + assert _sha.new().digestsize == 20 diff --git a/pypy/module/test_lib_pypy/test_structseq.py b/pypy/module/test_lib_pypy/test_structseq.py --- a/pypy/module/test_lib_pypy/test_structseq.py +++ b/pypy/module/test_lib_pypy/test_structseq.py @@ -1,77 +1,106 @@ -import py -from lib_pypy._structseq import structseqfield, structseqtype +from pypy.module.test_lib_pypy.support import import_lib_pypy -class mydata(metaclass=structseqtype): - st_mode = structseqfield(0, "protection bits") - st_ino = structseqfield(1) - st_dev = structseqfield(2) - st_nlink = structseqfield(3) - st_uid = structseqfield(4) - st_gid = structseqfield(5) - st_size = structseqfield(6) - _st_atime_as_int = structseqfield(7) - _st_mtime_as_int = structseqfield(8) - _st_ctime_as_int = structseqfield(9) - # skip to higher numbers for fields not part of the sequence. - # the numbers are only used to ordering - st_rdev = structseqfield(50, "device type (if inode device)") - st_atime = structseqfield(57, default=lambda self: self._st_atime_as_int) - st_mtime = structseqfield(58, default=lambda self: self._st_mtime_as_int) - st_ctime = structseqfield(59, default=lambda self: self._st_ctime_as_int) +class AppTestStructseq: + spaceconfig = dict(usemodules=('binascii', 'struct',)) -def test_class(): - assert mydata.st_mode.__doc__ == "protection bits" - assert mydata.n_fields == 14 - assert mydata.n_sequence_fields == 10 - assert mydata.n_unnamed_fields == 0 + def setup_class(cls): + cls.w__structseq = import_lib_pypy(cls.space, '_structseq') -def test_mydata(): - x = mydata(list(range(100, 111))) - assert x.n_sequence_fields == type(x).n_sequence_fields == 10 - assert x.n_fields == type(x).n_fields == 14 - assert x.st_mode == 100 - assert x.st_size == 106 - assert x.st_ctime == 109 # copied by the default=lambda... - assert x.st_rdev == 110 - assert len(x) == 10 - assert list(x) == list(range(100, 110)) - assert x + (5,) == tuple(range(100, 110)) + (5,) - assert x[4:12:2] == (104, 106, 108) - assert 104 in x - assert 110 not in x + def w_get_mydata(self): + _structseq = self._structseq + ssfield = _structseq.structseqfield + class mydata: + __metaclass__ = _structseq.structseqtype -def test_default_None(): - x = mydata(list(range(100, 110))) - assert x.st_rdev is None + st_mode = ssfield(0, "protection bits") + st_ino = ssfield(1) + st_dev = ssfield(2) + st_nlink = ssfield(3) + st_uid = ssfield(4) + st_gid = ssfield(5) + st_size = ssfield(6) + _st_atime_as_int = ssfield(7) + _st_mtime_as_int = ssfield(8) + _st_ctime_as_int = ssfield(9) + # skip to higher numbers for fields not part of the sequence. + # the numbers are only used to ordering + st_rdev = ssfield(50, "device type (if inode device)") + st_atime = ssfield(57, + default=lambda self: self._st_atime_as_int) + st_mtime = ssfield(58, + default=lambda self: self._st_mtime_as_int) + st_ctime = ssfield(59, + default=lambda self: self._st_ctime_as_int) + return mydata -def test_constructor(): - x = mydata(list(range(100, 111)), {'st_mtime': 12.25}) - assert x[8] == 108 - assert x.st_mtime == 12.25 + def test_class(self): + mydata = self.get_mydata() + assert mydata.st_mode.__doc__ == "protection bits" + assert mydata.n_fields == 14 + assert mydata.n_sequence_fields == 10 + assert mydata.n_unnamed_fields == 0 -def test_compare_like_tuple(): - x = mydata(list(range(100, 111))) - y = mydata(list(range(100, 110)) + [555]) - assert x == tuple(range(100, 110)) - assert x == y # blame CPython - assert hash(x) == hash(y) == hash(tuple(range(100, 110))) + def test_mydata(self): + mydata = self.get_mydata() + x = mydata(range(100, 111)) + assert x.n_sequence_fields == type(x).n_sequence_fields == 10 + assert x.n_fields == type(x).n_fields == 14 + assert x.st_mode == 100 + assert x.st_size == 106 + assert x.st_ctime == 109 # copied by the default=lambda... + assert x.st_rdev == 110 + assert len(x) == 10 + assert list(x) == range(100, 110) + assert x + (5,) == tuple(range(100, 110)) + (5,) + assert x[4:12:2] == (104, 106, 108) + assert 104 in x + assert 110 not in x -def test_pickle(): - import pickle - x = mydata(list(range(100, 111))) - s = pickle.dumps(x) - y = pickle.loads(s) - assert x == y - assert x.st_rdev == y.st_rdev == 110 + def test_default_None(self): + mydata = self.get_mydata() + x = mydata(range(100, 110)) + assert x.st_rdev is None -def test_readonly(): - x = mydata(list(range(100, 113))) - py.test.raises((TypeError, AttributeError), "x.st_mode = 1") - py.test.raises((TypeError, AttributeError), "x.st_mtime = 1") - py.test.raises((TypeError, AttributeError), "x.st_rdev = 1") + def test_constructor(self): + mydata = self.get_mydata() + x = mydata(range(100, 111), {'st_mtime': 12.25}) + assert x[8] == 108 + assert x.st_mtime == 12.25 -def test_no_extra_assignments(): - x = mydata(list(range(100, 113))) - py.test.raises((TypeError, AttributeError), "x.some_random_attribute = 1") + def test_compare_like_tuple(self): + mydata = self.get_mydata() + x = mydata(range(100, 111)) + y = mydata(range(100, 110) + [555]) + assert x == tuple(range(100, 110)) + assert x == y # blame CPython + assert hash(x) == hash(y) == hash(tuple(range(100, 110))) + + def test_pickle(self): + import pickle + import sys + import types + sys.modules['mod'] = mod = types.ModuleType('mod') + try: + mod.mydata = mydata = self.get_mydata() + mydata.__module__ = 'mod' + x = mydata(range(100, 111)) + s = pickle.dumps(x) + y = pickle.loads(s) + assert x == y + assert x.st_rdev == y.st_rdev == 110 + finally: + del sys.modules['mod'] + + def test_readonly(self): + mydata = self.get_mydata() + x = mydata(range(100, 113)) + raises((TypeError, AttributeError), "x.st_mode = 1") + raises((TypeError, AttributeError), "x.st_mtime = 1") + raises((TypeError, AttributeError), "x.st_rdev = 1") + + def test_no_extra_assignments(self): + mydata = self.get_mydata() + x = mydata(range(100, 113)) + raises((TypeError, AttributeError), "x.some_random_attribute = 1") diff --git a/pypy/module/test_lib_pypy/test_tputil.py b/pypy/module/test_lib_pypy/test_tputil.py --- a/pypy/module/test_lib_pypy/test_tputil.py +++ b/pypy/module/test_lib_pypy/test_tputil.py @@ -39,7 +39,7 @@ l = [] def f(*args): - print args + print(args) tp = make_proxy(f, type=A) #tp.__getslice__(0, 1) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -119,10 +119,7 @@ return W_ComplexObject(w_int.intval, 0.0) def delegate_Long2Complex(space, w_long): - try: - dval = w_long.tofloat() - except OverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(str(e))) + dval = w_long.tofloat(space) return W_ComplexObject(dval, 0.0) def delegate_Float2Complex(space, w_float): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -970,15 +970,18 @@ def update1(space, w_dict, w_data): - if space.findattr(w_data, space.wrap("keys")) is None: + if isinstance(w_data, W_DictMultiObject): # optimization case only + update1_dict_dict(space, w_dict, w_data) + return + w_method = space.findattr(w_data, space.wrap("keys")) + if w_method is None: # no 'keys' method, so we assume it is a sequence of pairs - update1_pairs(space, w_dict, w_data) + data_w = space.listview(w_data) + update1_pairs(space, w_dict, data_w) else: - if isinstance(w_data, W_DictMultiObject): # optimization case only - update1_dict_dict(space, w_dict, w_data) - else: - # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" - update1_keys(space, w_dict, w_data) + # general case -- "for k in o.keys(): dict.__setitem__(d, k, o[k])" + data_w = space.listview(space.call_function(w_method)) + update1_keys(space, w_dict, w_data, data_w) @jit.look_inside_iff(lambda space, w_dict, w_data: @@ -992,8 +995,8 @@ w_dict.setitem(w_key, w_value) -def update1_pairs(space, w_dict, w_data): - for w_pair in space.listview(w_data): +def update1_pairs(space, w_dict, data_w): + for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: raise OperationError(space.w_ValueError, @@ -1002,9 +1005,8 @@ w_dict.setitem(w_key, w_value) -def update1_keys(space, w_dict, w_data): - w_keys = space.call_method(w_data, "keys") - for w_key in space.listview(w_keys): +def update1_keys(space, w_dict, w_data, data_w): + for w_key in data_w: w_value = space.getitem(w_data, w_key) w_dict.setitem(w_key, w_value) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -66,11 +66,7 @@ # long-to-float delegation def delegate_Long2Float(space, w_longobj): - try: - return W_FloatObject(w_longobj.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return W_FloatObject(w_longobj.tofloat(space)) # float__Float is supposed to do nothing, unless it has diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -32,8 +32,12 @@ def unwrap(w_self, space): #YYYYYY return w_self.longval() - def tofloat(self): - return self.num.tofloat() + def tofloat(self, space): + try: + return self.num.tofloat() + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("long int too large to convert to float")) def toint(self): return self.num.toint() @@ -72,7 +76,7 @@ return w_self.num def float_w(self, space): - return self.num.tofloat() + return self.tofloat(space) def int(self, space): if (type(self) is not W_LongObject and @@ -137,11 +141,7 @@ return long_long(space, w_value) def float__Long(space, w_longobj): - try: - return space.newfloat(w_longobj.num.tofloat()) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("long int too large to convert to float")) + return space.newfloat(w_longobj.tofloat(space)) def repr__Long(space, w_long): return space.wrap(w_long.num.str()) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -175,8 +175,7 @@ if not space.isinstance_w(w_other, space.w_set): return space.w_NotImplemented - # XXX there is no test_buildinshortcut.py - # tested in test_buildinshortcut.py + # tested in test_builtinshortcut.py # XXX do not make new setobject here w_other_as_set = self._newobj(space, w_other) return space.wrap(self.equals(w_other_as_set)) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -370,6 +370,16 @@ d.update({'foo': 'bar'}, baz=1) assert d == {'foo': 'bar', 'baz': 1} + def test_update_keys_method(self): + class Foo(object): + def keys(self): + return [4, 1] + def __getitem__(self, key): + return key * 10 + d = {} + d.update(Foo()) + assert d == {1: 10, 4: 40} + def test_values(self): d = {1: 2, 3: 4} vals = list(d.values()) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -19,6 +19,12 @@ w_obj = fromlong(42) assert space.unwrap(w_obj) == 42 + def test_overflow_error(self): + space = self.space + fromlong = lobj.W_LongObject.fromlong + w_big = fromlong(10**900) + space.raises_w(space.w_OverflowError, space.float_w, w_big) + def test_rint_variants(self): py.test.skip("XXX broken!") from rpython.rtyper.tool.rfficache import platform diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py --- a/pypy/tool/pytest/objspace.py +++ b/pypy/tool/pytest/objspace.py @@ -106,6 +106,9 @@ def is_true(self, obj): return bool(obj) + def is_none(self, obj): + return obj is None + def str_w(self, w_str): return w_str diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -740,7 +740,8 @@ s = a.build_types(f, [B]) assert s.classdef is a.bookkeeper.getuniqueclassdef(C) - def test_union_type_some_opbc(self): + def test_union_type_some_pbc(self): + py.test.skip("is there a point? f() can return self.__class__ instead") class A(object): name = "A" diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -23,7 +23,6 @@ supports_longlong = False # XXX requires an implementation of # read_timestamp that works in user mode supports_singlefloats = not detect_hardfloat() - can_inline_varsize_malloc = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE all_reg_indexes = range(len(all_regs)) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -21,8 +21,6 @@ class AbstractLLCPU(AbstractCPU): from rpython.jit.metainterp.typesystem import llhelper as ts - can_inline_varsize_malloc = False - def __init__(self, rtyper, stats, opts, translate_support_code=False, gcdescr=None): assert type(opts) is not bool diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -305,8 +305,6 @@ arraydescr, kind=FLAG_ARRAY): """ itemsize is an int, v_length and v_result are boxes """ - if not self.cpu.can_inline_varsize_malloc: - return False # temporary, kill when ARM supports it gc_descr = self.gc_ll_descr if (kind == FLAG_ARRAY and (arraydescr.basesize != gc_descr.standard_array_basesize or diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -109,8 +109,6 @@ class BaseFakeCPU(object): JITFRAME_FIXED_SIZE = 0 - can_inline_varsize_malloc = True - def __init__(self): self.tracker = FakeTracker() self._cache = {} diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -23,8 +23,6 @@ with_threads = False frame_reg = regloc.ebp - can_inline_varsize_malloc = True - from rpython.jit.backend.x86.arch import JITFRAME_FIXED_SIZE all_reg_indexes = gpr_reg_mgr_cls.all_reg_indexes gen_regs = gpr_reg_mgr_cls.all_regs diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -7,6 +7,7 @@ from rpython.rlib import rgc from rpython.rlib.jit import elidable, oopspec from rpython.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask +from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper import rlist from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.extregistry import ExtRegistryEntry @@ -272,10 +273,9 @@ return result def _ll_1_int_abs(x): - if x < 0: - return -x - else: - return x + # this version doesn't branch + mask = x >> (LONG_BIT - 1) + return (x ^ mask) - mask def _ll_1_cast_uint_to_float(x): # XXX on 32-bit platforms, this should be done using cast_longlong_to_float diff --git a/rpython/jit/codewriter/test/test_codewriter.py b/rpython/jit/codewriter/test/test_codewriter.py --- a/rpython/jit/codewriter/test/test_codewriter.py +++ b/rpython/jit/codewriter/test/test_codewriter.py @@ -13,6 +13,7 @@ self.ARGS = ARGS self.RESULT = RESULT self.effectinfo = effectinfo + def get_extra_info(self): return self.effectinfo @@ -37,7 +38,7 @@ class tracker: pass - + calldescrof = FakeCallDescr fielddescrof = FakeFieldDescr sizeof = FakeSizeDescr @@ -121,20 +122,32 @@ blackholeinterp.run() assert blackholeinterp.get_tmpreg_i() == 100+6+5+4+3 + def test_instantiate(): - class A1: id = 651 - class A2(A1): id = 652 - class B1: id = 661 - class B2(B1): id = 662 + class A1: + id = 651 + + class A2(A1): + id = 652 + + class B1: + id = 661 + + class B2(B1): + id = 662 + def dont_look(n): return n + 1 + + classes = [ + (A1, B1), + (A2, B2) + ] + def f(n): - if n > 5: - x, y = A1, B1 - else: - x, y = A2, B2 + x, y = classes[n] return x().id + y().id + dont_look(n) - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [0]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(FakePolicy()) @@ -149,16 +162,10 @@ else: assert 0, "missing instantiate_*_%s in:\n%r" % (expected, names) - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'B1' in names - assert 'A2' in names - assert 'B2' in names + names = set([value for key, value in cw.assembler.list_of_addr2name]) assert 'dont_look' in names + def test_instantiate_with_unreasonable_attr(): # It is possible to have in real code the instantiate() function for # a class be dont-look-inside. This is caused by the code that @@ -169,17 +176,19 @@ name = graph.name return not (name.startswith('instantiate_') and name.endswith('A2')) + class A1: pass + class A2(A1): pass + + classes = [A1, A2] + def f(n): - if n > 5: - x = A1 - else: - x = A2 + x = classes[n] x() - rtyper = support.annotate(f, [35]) + rtyper = support.annotate(f, [1]) maingraph = rtyper.annotator.translator.graphs[0] cw = CodeWriter(FakeCPU(rtyper), [FakeJitDriverSD(maingraph)]) cw.find_all_graphs(MyFakePolicy()) @@ -188,12 +197,7 @@ names = [jitcode.name for jitcode in cw.assembler.indirectcalltargets] assert len(names) == 1 assert names[0].startswith('instantiate_') and names[0].endswith('A1') - # - print cw.assembler.list_of_addr2name - names = dict.fromkeys([value - for key, value in cw.assembler.list_of_addr2name]) - assert 'A1' in names - assert 'A2' in names + def test_int_abs(): def f(n): @@ -209,7 +213,7 @@ def test_raw_malloc_and_access(): TP = rffi.CArray(lltype.Signed) - + def f(n): a = lltype.malloc(TP, n, flavor='raw') a[0] = n diff --git a/rpython/jit/codewriter/test/test_effectinfo.py b/rpython/jit/codewriter/test/test_effectinfo.py --- a/rpython/jit/codewriter/test/test_effectinfo.py +++ b/rpython/jit/codewriter/test/test_effectinfo.py @@ -1,15 +1,22 @@ +import pytest + +from rpython.jit.codewriter.effectinfo import (effectinfo_from_writeanalyze, + EffectInfo, VirtualizableAnalyzer) +from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.rclass import OBJECT -from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.ootypesystem import ootype -from rpython.jit.codewriter.effectinfo import effectinfo_from_writeanalyze,\ - EffectInfo +from rpython.translator.translator import TranslationContext, graphof -class FakeCPU: + +class FakeCPU(object): def fielddescrof(self, T, fieldname): return ('fielddescr', T, fieldname) + def arraydescrof(self, A): return ('arraydescr', A) + def test_no_oopspec_duplicate(): # check that all the various EffectInfo.OS_* have unique values oopspecs = set() @@ -18,6 +25,7 @@ assert value not in oopspecs oopspecs.add(value) + def test_include_read_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a")]) @@ -26,6 +34,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("struct", lltype.Ptr(S), "a")]) @@ -34,6 +43,7 @@ assert not effectinfo.readonly_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_read_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A))]) @@ -43,6 +53,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_include_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("array", lltype.Ptr(A))]) @@ -51,6 +62,7 @@ assert not effectinfo.write_descrs_fields assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)] + def test_dont_include_read_and_write_field(): S = lltype.GcStruct("S", ("a", lltype.Signed)) effects = frozenset([("readstruct", lltype.Ptr(S), "a"), @@ -60,6 +72,7 @@ assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")] assert not effectinfo.write_descrs_arrays + def test_dont_include_read_and_write_array(): A = lltype.GcArray(lltype.Signed) effects = frozenset([("readarray", lltype.Ptr(A)), @@ -78,6 +91,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_array_of_void(): effects = frozenset([("array", lltype.Ptr(lltype.GcArray(lltype.Void)))]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -85,6 +99,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_struct_with_void(): effects = frozenset([("struct", lltype.Ptr(lltype.GcStruct("x", ("a", lltype.Void))), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -92,6 +107,7 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_ooarray_of_void(): effects = frozenset([("array", ootype.Array(ootype.Void))]) effectinfo = effectinfo_from_writeanalyze(effects, None) @@ -99,9 +115,43 @@ assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + def test_filter_out_instance_with_void(): effects = frozenset([("struct", ootype.Instance("x", ootype.ROOT, {"a": ootype.Void}), "a")]) effectinfo = effectinfo_from_writeanalyze(effects, None) assert not effectinfo.readonly_descrs_fields assert not effectinfo.write_descrs_fields assert not effectinfo.write_descrs_arrays + + +class TestVirtualizableAnalyzer(object): + def analyze(self, func, sig): + t = TranslationContext() + t.buildannotator().build_types(func, sig) + t.buildrtyper().specialize() + fgraph = graphof(t, func) + return VirtualizableAnalyzer(t).analyze(fgraph.startblock.operations[0]) + + def test_constructor(self): + class A(object): + x = 1 + + class B(A): + x = 2 + + @jit.elidable + def g(cls): + return cls() + + def f(x): + if x: + cls = A + else: + cls = B + return g(cls).x + + def entry(x): + return f(x) + + res = self.analyze(entry, [int]) + assert not res diff --git a/rpython/jit/codewriter/test/test_support.py b/rpython/jit/codewriter/test/test_support.py --- a/rpython/jit/codewriter/test/test_support.py +++ b/rpython/jit/codewriter/test/test_support.py @@ -1,8 +1,9 @@ -import py +import py, sys from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.annlowlevel import llstr from rpython.flowspace.model import Variable, Constant, SpaceOperation from rpython.jit.codewriter.support import decode_builtin_call, LLtypeHelpers +from rpython.jit.codewriter.support import _ll_1_int_abs def newconst(x): return Constant(x, lltype.typeOf(x)) @@ -133,3 +134,12 @@ py.test.raises(IndexError, func, p1, llstr("w")) py.test.raises(AttributeError, func, p1, llstr(None)) py.test.raises(AttributeError, func, llstr(None), p2) + +def test_int_abs(): + assert _ll_1_int_abs(0) == 0 + assert _ll_1_int_abs(1) == 1 + assert _ll_1_int_abs(10) == 10 + assert _ll_1_int_abs(sys.maxint) == sys.maxint + assert _ll_1_int_abs(-1) == 1 + assert _ll_1_int_abs(-10) == 10 + assert _ll_1_int_abs(-sys.maxint) == sys.maxint diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -451,11 +451,19 @@ @jit.elidable def repr(self): - return self.format(BASE10, suffix="L") + try: + x = self.toint() + except OverflowError: + return self.format(BASE10, suffix="L") + return str(x) + "L" @jit.elidable def str(self): - return self.format(BASE10) + try: + x = self.toint() + except OverflowError: + return self.format(BASE10) + return str(x) @jit.elidable def eq(self, other): @@ -2047,9 +2055,38 @@ # hint for the annotator for the slice below) return ''.join(result[next_char_index:]) -_FORMAT_MINDIGITS = 5 # 36 ** 5 fits in 32 bits, there may be a better choice for this -def _format_int(val, digits): +class _PartsCache(object): + def __init__(self): + # 36 - 3, because bases 0, 1 make no sense + # and 2 is handled differently + self.parts_cache = [None] * 34 + self.mindigits = [0] * 34 + + for i in range(34): + base = i + 3 + mindigits = 1 + while base ** mindigits < sys.maxint: + mindigits += 1 + mindigits -= 1 + self.mindigits[i] = mindigits + + def get_cached_parts(self, base): + index = base - 3 + res = self.parts_cache[index] + if res is None: + rbase = rbigint.fromint(base) + part = rbase.pow(rbigint.fromint(self.mindigits[index])) + res = [part] + self.parts_cache[base - 3] = res + return res + + def get_mindigits(self, base): + return self.mindigits[base - 3] + +_parts_cache = _PartsCache() + +def _format_int_general(val, digits): base = len(digits) out = [] while val: @@ -2058,26 +2095,27 @@ out.reverse() return "".join(out) +def _format_int10(val, digits): + return str(val) -def _format_recursive(x, i, output, pts, digits, size_prefix): + at specialize.arg(7) +def _format_recursive(x, i, output, pts, digits, size_prefix, mindigits, _format_int): # bottomed out with min_digit sized pieces # use str of ints if i < 0: # this checks whether any digit has been appended yet if output.getlength() == size_prefix: - if x.sign == 0: - pass - else: + if x.sign != 0: s = _format_int(x.toint(), digits) output.append(s) else: s = _format_int(x.toint(), digits) - output.append_multiple_char(digits[0], _FORMAT_MINDIGITS - len(s)) + output.append_multiple_char(digits[0], mindigits - len(s)) output.append(s) else: top, bot = x.divmod(pts[i]) # split the number - _format_recursive(top, i-1, output, pts, digits, size_prefix) - _format_recursive(bot, i-1, output, pts, digits, size_prefix) + _format_recursive(top, i-1, output, pts, digits, size_prefix, mindigits, _format_int) + _format_recursive(bot, i-1, output, pts, digits, size_prefix, mindigits, _format_int) def _format(x, digits, prefix='', suffix=''): if x.sign == 0: @@ -2092,22 +2130,42 @@ rbase = rbigint.fromint(base) two = rbigint.fromint(2) - pts = [rbase.pow(rbigint.fromint(_FORMAT_MINDIGITS))] - stringsize = _FORMAT_MINDIGITS - while pts[-1].lt(x): - pts.append(pts[-1].pow(two)) - stringsize *= 2 - pts.pop() # remove first base**2**i greater than x + pts = _parts_cache.get_cached_parts(base) + mindigits = _parts_cache.get_mindigits(base) + stringsize = mindigits + startindex = 0 + for startindex, part in enumerate(pts): + if not part.lt(x): + break + stringsize *= 2 # XXX can this overflow on 32 bit? + else: + # not enough parts computed yet + while pts[-1].lt(x): + pts.append(pts[-1].pow(two)) + stringsize *= 2 + + startindex = len(pts) - 1 + + # remove first base**2**i greater than x + startindex -= 1 output = StringBuilder(stringsize) if negative: output.append('-') output.append(prefix) - _format_recursive(x, len(pts)-1, output, pts, digits, output.getlength()) + if digits == BASE10: + _format_recursive( + x, startindex, output, pts, digits, output.getlength(), mindigits, + _format_int10) + else: + _format_recursive( + x, startindex, output, pts, digits, output.getlength(), mindigits, + _format_int_general) output.append(suffix) return output.build() + def _bitwise(a, op, b): # '&', '|', '^' """ Bitwise and/or/xor operations """ diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -93,6 +93,7 @@ SSL_RECEIVED_SHUTDOWN = rffi_platform.ConstantInteger( "SSL_RECEIVED_SHUTDOWN") SSL_MODE_AUTO_RETRY = rffi_platform.ConstantInteger("SSL_MODE_AUTO_RETRY") + SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER = rffi_platform.ConstantInteger("SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER") NID_subject_alt_name = rffi_platform.ConstantInteger("NID_subject_alt_name") GEN_DIRNAME = rffi_platform.ConstantInteger("GEN_DIRNAME") diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -517,7 +517,19 @@ assert x.format('.!') == ( '-!....!!..!!..!.!!.!......!...!...!!!........!') assert x.format('abcdefghijkl', '<<', '>>') == '-<>' - + + def test_format_caching(self): + big = rbigint.fromlong(2 ** 1000) + res1 = big.str() + oldpow = rbigint.__dict__['pow'] + rbigint.pow = None + # make sure pow is not used the second time + try: + res2 = big.str() + assert res2 == res1 + finally: + rbigint.pow = oldpow + def test_overzelous_assertion(self): a = rbigint.fromlong(-1<<10000) b = rbigint.fromlong(-1<<3000) diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -1,25 +1,20 @@ import types -import sys -from rpython.tool.pairtype import pairtype, pair -from rpython.annotator import model as annmodel -from rpython.annotator import description -from rpython.flowspace.model import Constant, Variable -from rpython.rtyper.lltypesystem.lltype import \ - typeOf, Void, ForwardReference, Struct, Bool, Char, \ - Ptr, malloc, nullptr, Array, Signed, FuncType -from rpython.rtyper.rmodel import Repr, TyperError, inputconst, inputdesc -from rpython.rtyper.rpbc import samesig,\ - commonbase, allattributenames, adjust_shape, \ - AbstractClassesPBCRepr, AbstractMethodsPBCRepr, OverriddenFunctionPBCRepr, \ - AbstractMultipleFrozenPBCRepr, MethodOfFrozenPBCRepr, \ - AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, \ - SingleFrozenPBCRepr, none_frozen_pbc_repr, get_concrete_calltable + +from rpython.annotator import description, model as annmodel +from rpython.rlib.debug import ll_assert +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper import callparse from rpython.rtyper.lltypesystem import rclass, llmemory -from rpython.tool.sourcetools import has_varargs -from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.debug import ll_assert +from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, + Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) +from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.rpbc import (AbstractClassesPBCRepr, AbstractMethodsPBCRepr, + OverriddenFunctionPBCRepr, AbstractMultipleFrozenPBCRepr, + AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, + SingleFrozenPBCRepr, MethodOfFrozenPBCRepr, none_frozen_pbc_repr, + get_concrete_calltable) +from rpython.tool.pairtype import pairtype -from rpython.rtyper import callparse def rtype_is_None(robj1, rnone2, hop, pos=0): if isinstance(robj1.lowleveltype, Ptr): @@ -41,6 +36,7 @@ else: raise TyperError('rtype_is_None of %r' % (robj1)) + # ____________________________________________________________ class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): @@ -67,7 +63,7 @@ mangled_name, r_value = self.fieldmap[attr] cmangledname = inputconst(Void, mangled_name) return llops.genop('getfield', [vpbc, cmangledname], - resulttype = r_value) + resulttype=r_value) class MultipleUnrelatedFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr): @@ -86,6 +82,7 @@ def null_instance(self): return llmemory.Address._defl() + class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr), pairtype(MultipleUnrelatedFrozenPBCRepr, @@ -100,11 +97,13 @@ vlist = hop.inputargs(r, r) return hop.genop('adr_eq', vlist, resulttype=Bool) + class __extend__(pairtype(MultipleFrozenPBCRepr, MultipleUnrelatedFrozenPBCRepr)): def convert_from_to((robj1, robj2), v, llops): return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address) + # ____________________________________________________________ class FunctionsPBCRepr(AbstractFunctionsPBCRepr): @@ -123,6 +122,7 @@ def get_specfunc_row(self, llop, v, c_rowname, resulttype): return llop.genop('getfield', [v, c_rowname], resulttype=resulttype) + class SmallFunctionSetPBCRepr(Repr): def __init__(self, rtyper, s_pbc): self.rtyper = rtyper @@ -252,15 +252,6 @@ return hop.genop('char_ne', [v1, inputconst(Char, '\000')], resulttype=Bool) -## def rtype_simple_call(self, hop): -## v_index = hop.inputarg(self, arg=0) -## v_ptr = hop.llops.convertvar(v_index, self, self.pointer_repr) -## hop2 = hop.copy() -## hop2.args_r[0] = self.pointer_repr -## hop2.args_v[0] = v_ptr -## return hop2.dispatch() - -## rtype_call_args = rtype_simple_call class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_set, r_ptr), v, llops): @@ -273,6 +264,7 @@ return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], resulttype=r_ptr.lowleveltype) + def compression_function(r_set): if r_set._compression_function is None: table = [] @@ -280,6 +272,7 @@ table.append((chr(i), p)) last_c, last_p = table[-1] unroll_table = unrolling_iterable(table[:-1]) + def ll_compress(fnptr): for c, p in unroll_table: if fnptr == p: @@ -290,6 +283,7 @@ r_set._compression_function = ll_compress return r_set._compression_function + class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_ptr, r_set), v, llops): if r_ptr.lowleveltype is Void: @@ -299,6 +293,7 @@ ll_compress = compression_function(r_set) return llops.gendirectcall(ll_compress, v) + def conversion_table(r_from, r_to): if r_to in r_from._conversion_tables: return r_from._conversion_tables[r_to] @@ -320,7 +315,6 @@ r_from._conversion_tables[r_to] = r return r -## myf = open('convlog.txt', 'w') class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_from, r_to), v, llops): @@ -343,6 +337,7 @@ else: From noreply at buildbot.pypy.org Sat Jul 20 02:21:16 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 02:21:16 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix greenlet exception handling: this has to normalize 3 item exception args Message-ID: <20130720002116.3A56C1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65492:6a0b8e7eaf5d Date: 2013-07-19 17:18 -0700 http://bitbucket.org/pypy/pypy/changeset/6a0b8e7eaf5d/ Log: fix greenlet exception handling: this has to normalize 3 item exception args without py2's 2 or 3 expression raise statement, so do it w/ a stupid new __pypy__ helper as the normalization rules are pretty annoying diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,4 +1,5 @@ import sys +import __pypy__ import _continuation __version__ = "0.4.0" @@ -79,7 +80,7 @@ # convert a "raise GreenletExit" into "return GreenletExit" if methodname == 'throw': try: - raise baseargs[0](baseargs[1]) + raise __pypy__.normalize_exc(baseargs[0], baseargs[1]) except GreenletExit as e: methodname = 'switch' baseargs = (((e,), {}),) @@ -156,7 +157,7 @@ def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet try: - raise value.with_traceback(tb) + raise __pypy__.normalize_exc(exc, value, tb) except GreenletExit as e: res = e finally: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -74,6 +74,7 @@ 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', 'set_debug' : 'interp_magic.set_debug', + 'normalize_exc' : 'interp_magic.normalize_exc', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,5 +1,5 @@ from pypy.interpreter.error import OperationError, wrap_oserror -from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache @@ -108,3 +108,9 @@ @unwrap_spec(estimate=int) def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) + + at unwrap_spec(w_value=WrappedDefault(None), w_tb=WrappedDefault(None)) +def normalize_exc(space, w_type, w_value=None, w_tb=None): + operr = OperationError(w_type, w_value, w_tb) + operr.normalize_exception(space) + return operr.get_w_value(space) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -93,6 +93,25 @@ o = 5 raises(TypeError, list_strategy, 5) + def test_normalize_exc(self): + from __pypy__ import normalize_exc + e = normalize_exc(TypeError) + assert isinstance(e, TypeError) + e = normalize_exc(TypeError, 'foo') + assert isinstance(e, TypeError) + assert str(e) == 'foo' + e = normalize_exc(TypeError('doh')) + assert isinstance(e, TypeError) + assert str(e) == 'doh' + + try: + 1 / 0 + except ZeroDivisionError as e: + tb = e.__traceback__ + e = normalize_exc(TypeError, None, tb) + assert isinstance(e, TypeError) + assert e.__traceback__ == tb + class AppTestJitFeatures(object): spaceconfig = {"translation.jit": True} From noreply at buildbot.pypy.org Sat Jul 20 02:21:17 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 02:21:17 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20130720002117.80B681C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65493:bce5675fd138 Date: 2013-07-19 17:19 -0700 http://bitbucket.org/pypy/pypy/changeset/bce5675fd138/ Log: adapt to py3 diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -37,5 +37,5 @@ if i < 2: expected_type = float else: - expected_type = (int, long) + expected_type = int assert isinstance(x[i], expected_type) diff --git a/pypy/module/test_lib_pypy/test_structseq.py b/pypy/module/test_lib_pypy/test_structseq.py --- a/pypy/module/test_lib_pypy/test_structseq.py +++ b/pypy/module/test_lib_pypy/test_structseq.py @@ -10,30 +10,32 @@ def w_get_mydata(self): _structseq = self._structseq - ssfield = _structseq.structseqfield - class mydata: - __metaclass__ = _structseq.structseqtype - - st_mode = ssfield(0, "protection bits") - st_ino = ssfield(1) - st_dev = ssfield(2) - st_nlink = ssfield(3) - st_uid = ssfield(4) - st_gid = ssfield(5) - st_size = ssfield(6) - _st_atime_as_int = ssfield(7) - _st_mtime_as_int = ssfield(8) - _st_ctime_as_int = ssfield(9) - # skip to higher numbers for fields not part of the sequence. - # the numbers are only used to ordering - st_rdev = ssfield(50, "device type (if inode device)") - st_atime = ssfield(57, - default=lambda self: self._st_atime_as_int) - st_mtime = ssfield(58, - default=lambda self: self._st_mtime_as_int) - st_ctime = ssfield(59, - default=lambda self: self._st_ctime_as_int) - return mydata + ns = dict(_structseq=_structseq, + ssfield=_structseq.structseqfield) + # need to exec since it uses the py3k-only metaclass syntax + exec("""\ +class mydata(metaclass=_structseq.structseqtype): + st_mode = ssfield(0, "protection bits") + st_ino = ssfield(1) + st_dev = ssfield(2) + st_nlink = ssfield(3) + st_uid = ssfield(4) + st_gid = ssfield(5) + st_size = ssfield(6) + _st_atime_as_int = ssfield(7) + _st_mtime_as_int = ssfield(8) + _st_ctime_as_int = ssfield(9) + # skip to higher numbers for fields not part of the sequence. + # the numbers are only used to ordering + st_rdev = ssfield(50, "device type (if inode device)") + st_atime = ssfield(57, + default=lambda self: self._st_atime_as_int) + st_mtime = ssfield(58, + default=lambda self: self._st_mtime_as_int) + st_ctime = ssfield(59, + default=lambda self: self._st_ctime_as_int) +""", ns) + return ns['mydata'] def test_class(self): mydata = self.get_mydata() @@ -52,7 +54,7 @@ assert x.st_ctime == 109 # copied by the default=lambda... assert x.st_rdev == 110 assert len(x) == 10 - assert list(x) == range(100, 110) + assert list(x) == list(range(100, 110)) assert x + (5,) == tuple(range(100, 110)) + (5,) assert x[4:12:2] == (104, 106, 108) assert 104 in x @@ -72,7 +74,7 @@ def test_compare_like_tuple(self): mydata = self.get_mydata() x = mydata(range(100, 111)) - y = mydata(range(100, 110) + [555]) + y = mydata(list(range(100, 110)) + [555]) assert x == tuple(range(100, 110)) assert x == y # blame CPython assert hash(x) == hash(y) == hash(tuple(range(100, 110))) From noreply at buildbot.pypy.org Sat Jul 20 03:22:03 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 03:22:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20130720012203.2DB331C0E1B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65494:e552c1932247 Date: 2013-07-19 17:49 -0700 http://bitbucket.org/pypy/pypy/changeset/e552c1932247/ Log: 2to3 diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -64,8 +64,7 @@ self.__dict__['st_ctime'] = self[9] -class statvfs_result: - __metaclass__ = structseqtype +class statvfs_result(metaclass=structseqtype): name = osname + ".statvfs_result" From noreply at buildbot.pypy.org Sat Jul 20 03:22:04 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 03:22:04 +0200 (CEST) Subject: [pypy-commit] pypy default: this eq_w doesn't seem necessary any longer and is broken (doesn't try is_w Message-ID: <20130720012204.578571C0E1B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65495:8462ed51bff9 Date: 2013-07-19 18:15 -0700 http://bitbucket.org/pypy/pypy/changeset/8462ed51bff9/ Log: this eq_w doesn't seem necessary any longer and is broken (doesn't try is_w first) diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -277,7 +277,3 @@ except py.test.raises.Exception, e: e.tbindex = getattr(e, 'tbindex', -1) - 1 raise - -def eq_w(space, w_obj1, w_obj2): - """ return interp-level boolean of eq(w_obj1, w_obj2). """ - return space.is_true(space.eq(w_obj1, w_obj2)) diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py --- a/pypy/tool/pytest/objspace.py +++ b/pypy/tool/pytest/objspace.py @@ -37,7 +37,6 @@ space.setitem(space.builtin.w_dict, space.wrap('skip'), space.wrap(appsupport.app_skip)) space.raises_w = appsupport.raises_w.__get__(space) - space.eq_w = appsupport.eq_w.__get__(space) return space From noreply at buildbot.pypy.org Sat Jul 20 03:22:05 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 03:22:05 +0200 (CEST) Subject: [pypy-commit] pypy default: convert to formal app-level style tests Message-ID: <20130720012205.8AE411C0E1B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65496:a26b87569de1 Date: 2013-07-19 18:15 -0700 http://bitbucket.org/pypy/pypy/changeset/a26b87569de1/ Log: convert to formal app-level style tests diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -2,15 +2,20 @@ Extra tests for the pure Python PyPy _collections module (not used in normal PyPy's) """ +from pypy.module.test_lib_pypy.support import import_lib_pypy -from __future__ import absolute_import -from lib_pypy import _collections as collections -import py -class TestDeque: +class AppTestDeque: + + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def setup_method(self, method): - self.n = 10 - self.d = collections.deque(range(self.n)) + space = self.space + n = 10 + self.w_n = space.wrap(n) + w_deque = space.getattr(self.w_collections, space.wrap('deque')) + self.w_d = space.call_function(w_deque, space.wrap(range(n))) def test_deque(self): assert len(self.d) == self.n @@ -22,24 +27,25 @@ def test_deque_iter(self): it = iter(self.d) - py.test.raises(TypeError, len, it) + raises(TypeError, len, it) assert it.next() == 0 self.d.pop() - py.test.raises(RuntimeError, it.next) + raises(RuntimeError, it.next) def test_deque_reversed(self): it = reversed(self.d) - py.test.raises(TypeError, len, it) + raises(TypeError, len, it) assert it.next() == self.n-1 assert it.next() == self.n-2 self.d.pop() - py.test.raises(RuntimeError, it.next) + raises(RuntimeError, it.next) def test_deque_remove(self): d = self.d - py.test.raises(ValueError, d.remove, "foobar") + raises(ValueError, d.remove, "foobar") def test_mutate_during_remove(self): + collections = self.collections # Handle evil mutator class MutateCmp: def __init__(self, deque, result): @@ -52,24 +58,33 @@ for match in (True, False): d = collections.deque(['ab']) d.extend([MutateCmp(d, match), 'c']) - py.test.raises(IndexError, d.remove, 'c') + raises(IndexError, d.remove, 'c') assert len(d) == 0 -class TestDequeExtra: +class AppTestDequeExtra: + + spaceconfig = dict(usemodules=('binascii', 'struct',)) + + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_remove_empty(self): + collections = self.collections d = collections.deque([]) - py.test.raises(ValueError, d.remove, 1) + raises(ValueError, d.remove, 1) def test_remove_mutating(self): + collections = self.collections class MutatingCmp(object): def __eq__(self, other): d.clear() return True d = collections.deque([MutatingCmp()]) - py.test.raises(IndexError, d.remove, 1) + raises(IndexError, d.remove, 1) def test_remove_failing(self): + collections = self.collections class FailingCmp(object): def __eq__(self, other): assert False @@ -77,10 +92,11 @@ f = FailingCmp() d = collections.deque([1, 2, 3, f, 4, 5]) d.remove(3) - py.test.raises(AssertionError, d.remove, 4) + raises(AssertionError, d.remove, 4) assert d == collections.deque([1, 2, f, 4, 5]) def test_maxlen(self): + collections = self.collections d = collections.deque([], 3) d.append(1); d.append(2); d.append(3); d.append(4) assert list(d) == [2, 3, 4] @@ -95,11 +111,13 @@ assert repr(d3) == "deque([2, 3, 4], maxlen=3)" def test_count(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) assert d.count(2) == 3 assert d.count(4) == 0 def test_reverse(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) d.reverse() assert list(d) == [2, 3, 2, 2, 1] @@ -109,6 +127,7 @@ assert list(d) == range(99, -1, -1) def test_subclass_with_kwargs(self): + collections = self.collections class SubclassWithKwargs(collections.deque): def __init__(self, newarg=1): collections.deque.__init__(self) @@ -116,11 +135,13 @@ # SF bug #1486663 -- this used to erroneously raise a TypeError SubclassWithKwargs(newarg=1) -def foobar(): - return list +class AppTestDefaultDict: -class TestDefaultDict: + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_basic(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory is None d1.default_factory = list @@ -148,20 +169,23 @@ assert 12 not in d2.keys() d2.default_factory = None assert d2.default_factory == None - py.test.raises(KeyError, d2.__getitem__, 15) - py.test.raises(TypeError, collections.defaultdict, 1) + raises(KeyError, d2.__getitem__, 15) + raises(TypeError, collections.defaultdict, 1) def test_constructor(self): + collections = self.collections assert collections.defaultdict(None) == {} assert collections.defaultdict(None, {1: 2}) == {1: 2} def test_missing(self): + collections = self.collections d1 = collections.defaultdict() - py.test.raises(KeyError, d1.__missing__, 42) + raises(KeyError, d1.__missing__, 42) d1.default_factory = list assert d1.__missing__(42) == [] def test_repr(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory == None assert repr(d1) == "defaultdict(None, {})" @@ -181,6 +205,7 @@ assert repr(d4) == "defaultdict(%s, {14: defaultdict(None, {})})" % repr(int) def test_recursive_repr(self): + collections = self.collections # Issue2045: stack overflow when default_factory is a bound method class sub(collections.defaultdict): def __init__(self): @@ -192,6 +217,7 @@ "defaultdict( Author: Philip Jenvey Branch: py3k Changeset: r65497:51a3db8d788f Date: 2013-07-19 18:19 -0700 http://bitbucket.org/pypy/pypy/changeset/51a3db8d788f/ Log: merge default diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -2,15 +2,20 @@ Extra tests for the pure Python PyPy _collections module (not used in normal PyPy's) """ +from pypy.module.test_lib_pypy.support import import_lib_pypy -from __future__ import absolute_import -from lib_pypy import _collections as collections -import py -class TestDeque: +class AppTestDeque: + + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def setup_method(self, method): - self.n = 10 - self.d = collections.deque(range(self.n)) + space = self.space + n = 10 + self.w_n = space.wrap(n) + w_deque = space.getattr(self.w_collections, space.wrap('deque')) + self.w_d = space.call_function(w_deque, space.wrap(range(n))) def test_deque(self): assert len(self.d) == self.n @@ -22,24 +27,25 @@ def test_deque_iter(self): it = iter(self.d) - py.test.raises(TypeError, len, it) + raises(TypeError, len, it) assert it.next() == 0 self.d.pop() - py.test.raises(RuntimeError, it.next) + raises(RuntimeError, it.next) def test_deque_reversed(self): it = reversed(self.d) - py.test.raises(TypeError, len, it) + raises(TypeError, len, it) assert it.next() == self.n-1 assert it.next() == self.n-2 self.d.pop() - py.test.raises(RuntimeError, it.next) + raises(RuntimeError, it.next) def test_deque_remove(self): d = self.d - py.test.raises(ValueError, d.remove, "foobar") + raises(ValueError, d.remove, "foobar") def test_mutate_during_remove(self): + collections = self.collections # Handle evil mutator class MutateCmp: def __init__(self, deque, result): @@ -52,24 +58,33 @@ for match in (True, False): d = collections.deque(['ab']) d.extend([MutateCmp(d, match), 'c']) - py.test.raises(IndexError, d.remove, 'c') + raises(IndexError, d.remove, 'c') assert len(d) == 0 -class TestDequeExtra: +class AppTestDequeExtra: + + spaceconfig = dict(usemodules=('binascii', 'struct',)) + + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_remove_empty(self): + collections = self.collections d = collections.deque([]) - py.test.raises(ValueError, d.remove, 1) + raises(ValueError, d.remove, 1) def test_remove_mutating(self): + collections = self.collections class MutatingCmp(object): def __eq__(self, other): d.clear() return True d = collections.deque([MutatingCmp()]) - py.test.raises(IndexError, d.remove, 1) + raises(IndexError, d.remove, 1) def test_remove_failing(self): + collections = self.collections class FailingCmp(object): def __eq__(self, other): assert False @@ -77,10 +92,11 @@ f = FailingCmp() d = collections.deque([1, 2, 3, f, 4, 5]) d.remove(3) - py.test.raises(AssertionError, d.remove, 4) + raises(AssertionError, d.remove, 4) assert d == collections.deque([1, 2, f, 4, 5]) def test_maxlen(self): + collections = self.collections d = collections.deque([], 3) d.append(1); d.append(2); d.append(3); d.append(4) assert list(d) == [2, 3, 4] @@ -95,11 +111,13 @@ assert repr(d3) == "deque([2, 3, 4], maxlen=3)" def test_count(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) assert d.count(2) == 3 assert d.count(4) == 0 def test_reverse(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) d.reverse() assert list(d) == [2, 3, 2, 2, 1] @@ -109,6 +127,7 @@ assert list(d) == range(99, -1, -1) def test_subclass_with_kwargs(self): + collections = self.collections class SubclassWithKwargs(collections.deque): def __init__(self, newarg=1): collections.deque.__init__(self) @@ -116,11 +135,13 @@ # SF bug #1486663 -- this used to erroneously raise a TypeError SubclassWithKwargs(newarg=1) -def foobar(): - return list +class AppTestDefaultDict: -class TestDefaultDict: + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_basic(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory is None d1.default_factory = list @@ -148,20 +169,23 @@ assert 12 not in d2.keys() d2.default_factory = None assert d2.default_factory == None - py.test.raises(KeyError, d2.__getitem__, 15) - py.test.raises(TypeError, collections.defaultdict, 1) + raises(KeyError, d2.__getitem__, 15) + raises(TypeError, collections.defaultdict, 1) def test_constructor(self): + collections = self.collections assert collections.defaultdict(None) == {} assert collections.defaultdict(None, {1: 2}) == {1: 2} def test_missing(self): + collections = self.collections d1 = collections.defaultdict() - py.test.raises(KeyError, d1.__missing__, 42) + raises(KeyError, d1.__missing__, 42) d1.default_factory = list assert d1.__missing__(42) == [] def test_repr(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory == None assert repr(d1) == "defaultdict(None, {})" @@ -181,6 +205,7 @@ assert repr(d4) == "defaultdict(%s, {14: defaultdict(None, {})})" % repr(int) def test_recursive_repr(self): + collections = self.collections # Issue2045: stack overflow when default_factory is a bound method class sub(collections.defaultdict): def __init__(self): @@ -192,6 +217,7 @@ "defaultdict( Author: Philip Jenvey Branch: py3k Changeset: r65498:018c042a46f0 Date: 2013-07-19 18:20 -0700 http://bitbucket.org/pypy/pypy/changeset/018c042a46f0/ Log: 2to3 diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -28,17 +28,17 @@ def test_deque_iter(self): it = iter(self.d) raises(TypeError, len, it) - assert it.next() == 0 + assert next(it) == 0 self.d.pop() - raises(RuntimeError, it.next) + raises(RuntimeError, next, it) def test_deque_reversed(self): it = reversed(self.d) raises(TypeError, len, it) - assert it.next() == self.n-1 - assert it.next() == self.n-2 + assert next(it) == self.n-1 + assert next(it) == self.n-2 self.d.pop() - raises(RuntimeError, it.next) + raises(RuntimeError, next, it) def test_deque_remove(self): d = self.d @@ -124,7 +124,7 @@ d = collections.deque(range(100)) d.reverse() - assert list(d) == range(99, -1, -1) + assert list(d) == list(range(99, -1, -1)) def test_subclass_with_kwargs(self): collections = self.collections @@ -194,7 +194,7 @@ d2 = collections.defaultdict(int) assert d2.default_factory == int d2[12] = 42 - assert repr(d2) == "defaultdict(, {12: 42})" + assert repr(d2) == "defaultdict(, {12: 42})" def foo(): return 43 d3 = collections.defaultdict(foo) assert d3.default_factory is foo From noreply at buildbot.pypy.org Sat Jul 20 03:39:58 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 03:39:58 +0200 (CEST) Subject: [pypy-commit] pypy default: slight tweaks to help this work under py3k appdirect mode Message-ID: <20130720013958.9B40D1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65499:0c65312f0027 Date: 2013-07-19 18:33 -0700 http://bitbucket.org/pypy/pypy/changeset/0c65312f0027/ Log: slight tweaks to help this work under py3k appdirect mode diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -8,40 +8,41 @@ class AppTestDeque: def setup_class(cls): - cls.w_collections = import_lib_pypy(cls.space, '_collections') + space = cls.space + cls.w_collections = import_lib_pypy(space, '_collections') + cls.w_n = space.wrap(10) - def setup_method(self, method): - space = self.space - n = 10 - self.w_n = space.wrap(n) - w_deque = space.getattr(self.w_collections, space.wrap('deque')) - self.w_d = space.call_function(w_deque, space.wrap(range(n))) + def w_get_deque(self): + return self.collections.deque(range(self.n)) def test_deque(self): - assert len(self.d) == self.n + d = self.get_deque() + assert len(d) == self.n for i in range(self.n): - assert i == self.d[i] + assert i == d[i] for i in range(self.n-1, -1, -1): - assert self.d.pop() == i - assert len(self.d) == 0 + assert d.pop() == i + assert len(d) == 0 def test_deque_iter(self): - it = iter(self.d) + d = self.get_deque() + it = iter(d) raises(TypeError, len, it) assert it.next() == 0 - self.d.pop() + d.pop() raises(RuntimeError, it.next) def test_deque_reversed(self): - it = reversed(self.d) + d = self.get_deque() + it = reversed(d) raises(TypeError, len, it) assert it.next() == self.n-1 assert it.next() == self.n-2 - self.d.pop() + d.pop() raises(RuntimeError, it.next) def test_deque_remove(self): - d = self.d + d = self.get_deque() raises(ValueError, d.remove, "foobar") def test_mutate_during_remove(self): From noreply at buildbot.pypy.org Sat Jul 20 03:39:59 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 03:39:59 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130720013959.E6BBA1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65500:fcb802e5397a Date: 2013-07-19 18:35 -0700 http://bitbucket.org/pypy/pypy/changeset/fcb802e5397a/ Log: merge default diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -8,40 +8,41 @@ class AppTestDeque: def setup_class(cls): - cls.w_collections = import_lib_pypy(cls.space, '_collections') + space = cls.space + cls.w_collections = import_lib_pypy(space, '_collections') + cls.w_n = space.wrap(10) - def setup_method(self, method): - space = self.space - n = 10 - self.w_n = space.wrap(n) - w_deque = space.getattr(self.w_collections, space.wrap('deque')) - self.w_d = space.call_function(w_deque, space.wrap(range(n))) + def w_get_deque(self): + return self.collections.deque(range(self.n)) def test_deque(self): - assert len(self.d) == self.n + d = self.get_deque() + assert len(d) == self.n for i in range(self.n): - assert i == self.d[i] + assert i == d[i] for i in range(self.n-1, -1, -1): - assert self.d.pop() == i - assert len(self.d) == 0 + assert d.pop() == i + assert len(d) == 0 def test_deque_iter(self): - it = iter(self.d) + d = self.get_deque() + it = iter(d) raises(TypeError, len, it) assert next(it) == 0 - self.d.pop() + d.pop() raises(RuntimeError, next, it) def test_deque_reversed(self): - it = reversed(self.d) + d = self.get_deque() + it = reversed(d) raises(TypeError, len, it) assert next(it) == self.n-1 assert next(it) == self.n-2 - self.d.pop() + d.pop() raises(RuntimeError, next, it) def test_deque_remove(self): - d = self.d + d = self.get_deque() raises(ValueError, d.remove, "foobar") def test_mutate_during_remove(self): From noreply at buildbot.pypy.org Sat Jul 20 03:40:01 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 03:40:01 +0200 (CEST) Subject: [pypy-commit] pypy py3k: add the pypyroot to the appdirect sys.path so it can import from lib_pypy Message-ID: <20130720014001.335191C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65501:fd721a7dc616 Date: 2013-07-19 18:38 -0700 http://bitbucket.org/pypy/pypy/changeset/fd721a7dc616/ Log: add the pypyroot to the appdirect sys.path so it can import from lib_pypy diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py --- a/pypy/tool/pytest/apptest.py +++ b/pypy/tool/pytest/apptest.py @@ -7,6 +7,7 @@ # ...unless the -A option ('runappdirect') is passed. import py +import os import sys, textwrap, types from pypy.interpreter.gateway import app2interp_temp from pypy.interpreter.error import OperationError @@ -15,9 +16,11 @@ from pypy.tool.pytest import appsupport from pypy.tool.pytest.objspace import gettestobjspace from rpython.tool.udir import udir -from pypy.conftest import PyPyClassCollector +from pypy.conftest import PyPyClassCollector, pypydir from inspect import getmro +pypyroot = os.path.dirname(pypydir) + RENAMED_USEMODULES = dict( _winreg='winreg', exceptions='builtins', @@ -69,6 +72,7 @@ helpers = r"""# -*- encoding: utf-8 -*- if 1: import sys + sys.path.append('%s') %s def skip(message): print(message) @@ -161,7 +165,7 @@ else: target_name = target_.__name__ with pyfile.open('w') as f: - f.write(helpers % check_usemodules) + f.write(helpers % (pypyroot, check_usemodules)) f.write('\n'.join(defs)) f.write('def %s():\n' % target_name) f.write('\n'.join(source)) From noreply at buildbot.pypy.org Sat Jul 20 03:49:45 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 03:49:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k: this tests specifically our own code module modifications, so skip it under Message-ID: <20130720014945.15CF01C14B6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65502:897dfe5eadb6 Date: 2013-07-19 18:48 -0700 http://bitbucket.org/pypy/pypy/changeset/897dfe5eadb6/ Log: this tests specifically our own code module modifications, so skip it under appdirect diff --git a/pypy/module/test_lib_pypy/test_code_module.py b/pypy/module/test_lib_pypy/test_code_module.py --- a/pypy/module/test_lib_pypy/test_code_module.py +++ b/pypy/module/test_lib_pypy/test_code_module.py @@ -1,5 +1,12 @@ +import py + + class AppTestCodeModule: + def setup_class(cls): + if cls.runappdirect: + py.test.skip("CPython's code module doesn't yet support this") + def w_get_interp(self): import code import io From noreply at buildbot.pypy.org Sat Jul 20 05:09:22 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 05:09:22 +0200 (CEST) Subject: [pypy-commit] pypy default: oops Message-ID: <20130720030922.1E5B71C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65503:4121d8d7d855 Date: 2013-07-19 19:13 -0700 http://bitbucket.org/pypy/pypy/changeset/4121d8d7d855/ Log: oops diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py --- a/pypy/module/test_lib_pypy/support.py +++ b/pypy/module/test_lib_pypy/support.py @@ -24,7 +24,7 @@ # overshadows it w_mod = space.appexec([], "(): import %s; return %s" % (name, name)) except OperationError as e: - if skipmsg is not None or not e.match(space, space.w_ImportError): + if skipmsg is None or not e.match(space, space.w_ImportError): raise py.test.skip('%s (%s))' % (skipmsg, str(e))) w_file = space.getattr(w_mod, space.wrap('__file__')) From noreply at buildbot.pypy.org Sat Jul 20 05:09:23 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 05:09:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130720030923.62B2A1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65504:92b54ee3d6f2 Date: 2013-07-19 19:14 -0700 http://bitbucket.org/pypy/pypy/changeset/92b54ee3d6f2/ Log: merge default diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py --- a/pypy/module/test_lib_pypy/support.py +++ b/pypy/module/test_lib_pypy/support.py @@ -24,7 +24,7 @@ # overshadows it w_mod = space.appexec([], "(): import %s; return %s" % (name, name)) except OperationError as e: - if skipmsg is not None or not e.match(space, space.w_ImportError): + if skipmsg is None or not e.match(space, space.w_ImportError): raise py.test.skip('%s (%s))' % (skipmsg, str(e))) w_file = space.getattr(w_mod, space.wrap('__file__')) From noreply at buildbot.pypy.org Sat Jul 20 05:09:24 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 05:09:24 +0200 (CEST) Subject: [pypy-commit] pypy default: prefer the py3k friendly sys.maxsize Message-ID: <20130720030924.9D4391C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65505:a4c42ef3992f Date: 2013-07-19 19:51 -0700 http://bitbucket.org/pypy/pypy/changeset/a4c42ef3992f/ Log: prefer the py3k friendly sys.maxsize diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -5,41 +5,41 @@ def w_intmask(self, n): import sys - n &= (sys.maxint*2+1) - if n > sys.maxint: - n -= 2*(sys.maxint+1) + n &= (sys.maxsize*2+1) + if n > sys.maxsize: + n -= 2*(sys.maxsize+1) return int(n) def test_intmask(self): import sys - assert self.intmask(sys.maxint) == sys.maxint - assert self.intmask(sys.maxint+1) == -sys.maxint-1 - assert self.intmask(-sys.maxint-2) == sys.maxint + assert self.intmask(sys.maxsize) == sys.maxsize + assert self.intmask(sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(-sys.maxsize-2) == sys.maxsize N = 2 ** 128 - assert self.intmask(N+sys.maxint) == sys.maxint - assert self.intmask(N+sys.maxint+1) == -sys.maxint-1 - assert self.intmask(N-sys.maxint-2) == sys.maxint + assert self.intmask(N+sys.maxsize) == sys.maxsize + assert self.intmask(N+sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(N-sys.maxsize-2) == sys.maxsize def test_int_add(self): import sys from __pypy__ import intop assert intop.int_add(40, 2) == 42 - assert intop.int_add(sys.maxint, 1) == -sys.maxint-1 - assert intop.int_add(-2, -sys.maxint) == sys.maxint + assert intop.int_add(sys.maxsize, 1) == -sys.maxsize-1 + assert intop.int_add(-2, -sys.maxsize) == sys.maxsize def test_int_sub(self): import sys from __pypy__ import intop assert intop.int_sub(40, -2) == 42 - assert intop.int_sub(sys.maxint, -1) == -sys.maxint-1 - assert intop.int_sub(-2, sys.maxint) == sys.maxint + assert intop.int_sub(sys.maxsize, -1) == -sys.maxsize-1 + assert intop.int_sub(-2, sys.maxsize) == sys.maxsize def test_int_mul(self): import sys from __pypy__ import intop assert intop.int_mul(40, -2) == -80 - assert intop.int_mul(-sys.maxint, -sys.maxint) == ( - self.intmask(sys.maxint ** 2)) + assert intop.int_mul(-sys.maxsize, -sys.maxsize) == ( + self.intmask(sys.maxsize ** 2)) def test_int_floordiv(self): import sys @@ -48,8 +48,8 @@ assert intop.int_floordiv(41, -3) == -13 assert intop.int_floordiv(-41, 3) == -13 assert intop.int_floordiv(-41, -3) == 13 - assert intop.int_floordiv(-sys.maxint, -1) == sys.maxint - assert intop.int_floordiv(sys.maxint, -1) == -sys.maxint + assert intop.int_floordiv(-sys.maxsize, -1) == sys.maxsize + assert intop.int_floordiv(sys.maxsize, -1) == -sys.maxsize def test_int_mod(self): import sys @@ -58,26 +58,26 @@ assert intop.int_mod(41, -3) == 2 assert intop.int_mod(-41, 3) == -2 assert intop.int_mod(-41, -3) == -2 - assert intop.int_mod(-sys.maxint, -1) == 0 - assert intop.int_mod(sys.maxint, -1) == 0 + assert intop.int_mod(-sys.maxsize, -1) == 0 + assert intop.int_mod(sys.maxsize, -1) == 0 def test_int_lshift(self): import sys from __pypy__ import intop - if sys.maxint == 2**31-1: + if sys.maxsize == 2**31-1: bits = 32 else: bits = 64 assert intop.int_lshift(42, 3) == 42 << 3 assert intop.int_lshift(0, 3333) == 0 assert intop.int_lshift(1, bits-2) == 1 << (bits-2) - assert intop.int_lshift(1, bits-1) == -sys.maxint-1 == (-1) << (bits-1) + assert intop.int_lshift(1, bits-1) == -sys.maxsize-1 == (-1) << (bits-1) assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) - assert intop.int_lshift(-1, bits-1) == -sys.maxint-1 - assert intop.int_lshift(sys.maxint // 3, 2) == ( - self.intmask((sys.maxint // 3) << 2)) - assert intop.int_lshift(-sys.maxint // 3, 2) == ( - self.intmask((-sys.maxint // 3) << 2)) + assert intop.int_lshift(-1, bits-1) == -sys.maxsize-1 + assert intop.int_lshift(sys.maxsize // 3, 2) == ( + self.intmask((sys.maxsize // 3) << 2)) + assert intop.int_lshift(-sys.maxsize // 3, 2) == ( + self.intmask((-sys.maxsize // 3) << 2)) def test_int_rshift(self): from __pypy__ import intop @@ -90,7 +90,7 @@ def test_uint_rshift(self): import sys from __pypy__ import intop - if sys.maxint == 2**31-1: + if sys.maxsize == 2**31-1: bits = 32 else: bits = 64 @@ -99,6 +99,6 @@ assert intop.uint_rshift(-42, 3) == (N-42) >> 3 assert intop.uint_rshift(0, 3333) == 0 assert intop.uint_rshift(-1, 0) == -1 - assert intop.uint_rshift(-1, 1) == sys.maxint + assert intop.uint_rshift(-1, 1) == sys.maxsize assert intop.uint_rshift(-1, bits-2) == 3 assert intop.uint_rshift(-1, bits-1) == 1 From noreply at buildbot.pypy.org Sat Jul 20 05:09:25 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 05:09:25 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify Message-ID: <20130720030925.C8E6F1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65506:a401eb4866e1 Date: 2013-07-19 19:58 -0700 http://bitbucket.org/pypy/pypy/changeset/a401eb4866e1/ Log: simplify diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py --- a/pypy/module/test_lib_pypy/support.py +++ b/pypy/module/test_lib_pypy/support.py @@ -19,16 +19,15 @@ return getattr(mod, name) try: - # Assume app-level import finds it from the right place (we - # assert so afterwards). It should as long as a builtin module - # overshadows it - w_mod = space.appexec([], "(): import %s; return %s" % (name, name)) + # app-level import should find it from the right place (we + # assert so afterwards) as long as a builtin module doesn't + # overshadow it + failed = ("%s didn't import from lib_pypy. Is a usemodules directive " + "overshadowing it?" % name) + importline = ("(): import %s; assert 'lib_pypy' in %s.__file__, %r; " + "return %s" % (name, name, failed, name)) + return space.appexec([], importline) except OperationError as e: if skipmsg is None or not e.match(space, space.w_ImportError): raise py.test.skip('%s (%s))' % (skipmsg, str(e))) - w_file = space.getattr(w_mod, space.wrap('__file__')) - assert space.is_true(space.contains(w_file, space.wrap('lib_pypy'))), \ - ("%s didn't import from lib_pypy. Is a usemodules directive " - "overshadowing it?" % name) - return w_mod From noreply at buildbot.pypy.org Sat Jul 20 05:09:27 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 05:09:27 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130720030927.2797E1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65507:6ea19b5c2a9c Date: 2013-07-19 19:59 -0700 http://bitbucket.org/pypy/pypy/changeset/6ea19b5c2a9c/ Log: merge default diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py --- a/pypy/module/__pypy__/test/test_intop.py +++ b/pypy/module/__pypy__/test/test_intop.py @@ -5,41 +5,41 @@ def w_intmask(self, n): import sys - n &= (sys.maxint*2+1) - if n > sys.maxint: - n -= 2*(sys.maxint+1) + n &= (sys.maxsize*2+1) + if n > sys.maxsize: + n -= 2*(sys.maxsize+1) return int(n) def test_intmask(self): import sys - assert self.intmask(sys.maxint) == sys.maxint - assert self.intmask(sys.maxint+1) == -sys.maxint-1 - assert self.intmask(-sys.maxint-2) == sys.maxint + assert self.intmask(sys.maxsize) == sys.maxsize + assert self.intmask(sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(-sys.maxsize-2) == sys.maxsize N = 2 ** 128 - assert self.intmask(N+sys.maxint) == sys.maxint - assert self.intmask(N+sys.maxint+1) == -sys.maxint-1 - assert self.intmask(N-sys.maxint-2) == sys.maxint + assert self.intmask(N+sys.maxsize) == sys.maxsize + assert self.intmask(N+sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(N-sys.maxsize-2) == sys.maxsize def test_int_add(self): import sys from __pypy__ import intop assert intop.int_add(40, 2) == 42 - assert intop.int_add(sys.maxint, 1) == -sys.maxint-1 - assert intop.int_add(-2, -sys.maxint) == sys.maxint + assert intop.int_add(sys.maxsize, 1) == -sys.maxsize-1 + assert intop.int_add(-2, -sys.maxsize) == sys.maxsize def test_int_sub(self): import sys from __pypy__ import intop assert intop.int_sub(40, -2) == 42 - assert intop.int_sub(sys.maxint, -1) == -sys.maxint-1 - assert intop.int_sub(-2, sys.maxint) == sys.maxint + assert intop.int_sub(sys.maxsize, -1) == -sys.maxsize-1 + assert intop.int_sub(-2, sys.maxsize) == sys.maxsize def test_int_mul(self): import sys from __pypy__ import intop assert intop.int_mul(40, -2) == -80 - assert intop.int_mul(-sys.maxint, -sys.maxint) == ( - self.intmask(sys.maxint ** 2)) + assert intop.int_mul(-sys.maxsize, -sys.maxsize) == ( + self.intmask(sys.maxsize ** 2)) def test_int_floordiv(self): import sys @@ -48,8 +48,8 @@ assert intop.int_floordiv(41, -3) == -13 assert intop.int_floordiv(-41, 3) == -13 assert intop.int_floordiv(-41, -3) == 13 - assert intop.int_floordiv(-sys.maxint, -1) == sys.maxint - assert intop.int_floordiv(sys.maxint, -1) == -sys.maxint + assert intop.int_floordiv(-sys.maxsize, -1) == sys.maxsize + assert intop.int_floordiv(sys.maxsize, -1) == -sys.maxsize def test_int_mod(self): import sys @@ -58,26 +58,26 @@ assert intop.int_mod(41, -3) == 2 assert intop.int_mod(-41, 3) == -2 assert intop.int_mod(-41, -3) == -2 - assert intop.int_mod(-sys.maxint, -1) == 0 - assert intop.int_mod(sys.maxint, -1) == 0 + assert intop.int_mod(-sys.maxsize, -1) == 0 + assert intop.int_mod(sys.maxsize, -1) == 0 def test_int_lshift(self): import sys from __pypy__ import intop - if sys.maxint == 2**31-1: + if sys.maxsize == 2**31-1: bits = 32 else: bits = 64 assert intop.int_lshift(42, 3) == 42 << 3 assert intop.int_lshift(0, 3333) == 0 assert intop.int_lshift(1, bits-2) == 1 << (bits-2) - assert intop.int_lshift(1, bits-1) == -sys.maxint-1 == (-1) << (bits-1) + assert intop.int_lshift(1, bits-1) == -sys.maxsize-1 == (-1) << (bits-1) assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) - assert intop.int_lshift(-1, bits-1) == -sys.maxint-1 - assert intop.int_lshift(sys.maxint // 3, 2) == ( - self.intmask((sys.maxint // 3) << 2)) - assert intop.int_lshift(-sys.maxint // 3, 2) == ( - self.intmask((-sys.maxint // 3) << 2)) + assert intop.int_lshift(-1, bits-1) == -sys.maxsize-1 + assert intop.int_lshift(sys.maxsize // 3, 2) == ( + self.intmask((sys.maxsize // 3) << 2)) + assert intop.int_lshift(-sys.maxsize // 3, 2) == ( + self.intmask((-sys.maxsize // 3) << 2)) def test_int_rshift(self): from __pypy__ import intop @@ -90,7 +90,7 @@ def test_uint_rshift(self): import sys from __pypy__ import intop - if sys.maxint == 2**31-1: + if sys.maxsize == 2**31-1: bits = 32 else: bits = 64 @@ -99,6 +99,6 @@ assert intop.uint_rshift(-42, 3) == (N-42) >> 3 assert intop.uint_rshift(0, 3333) == 0 assert intop.uint_rshift(-1, 0) == -1 - assert intop.uint_rshift(-1, 1) == sys.maxint + assert intop.uint_rshift(-1, 1) == sys.maxsize assert intop.uint_rshift(-1, bits-2) == 3 assert intop.uint_rshift(-1, bits-1) == 1 diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py --- a/pypy/module/test_lib_pypy/support.py +++ b/pypy/module/test_lib_pypy/support.py @@ -19,16 +19,15 @@ return getattr(mod, name) try: - # Assume app-level import finds it from the right place (we - # assert so afterwards). It should as long as a builtin module - # overshadows it - w_mod = space.appexec([], "(): import %s; return %s" % (name, name)) + # app-level import should find it from the right place (we + # assert so afterwards) as long as a builtin module doesn't + # overshadow it + failed = ("%s didn't import from lib_pypy. Is a usemodules directive " + "overshadowing it?" % name) + importline = ("(): import %s; assert 'lib_pypy' in %s.__file__, %r; " + "return %s" % (name, name, failed, name)) + return space.appexec([], importline) except OperationError as e: if skipmsg is None or not e.match(space, space.w_ImportError): raise py.test.skip('%s (%s))' % (skipmsg, str(e))) - w_file = space.getattr(w_mod, space.wrap('__file__')) - assert space.is_true(space.contains(w_file, space.wrap('lib_pypy'))), \ - ("%s didn't import from lib_pypy. Is a usemodules directive " - "overshadowing it?" % name) - return w_mod From noreply at buildbot.pypy.org Sat Jul 20 05:09:28 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 05:09:28 +0200 (CEST) Subject: [pypy-commit] pypy py3k: tweak special casing of lib_pypy imports for py3's appdirect Message-ID: <20130720030928.5717E1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65508:c1b28cfd5002 Date: 2013-07-19 20:04 -0700 http://bitbucket.org/pypy/pypy/changeset/c1b28cfd5002/ Log: tweak special casing of lib_pypy imports for py3's appdirect diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py --- a/pypy/module/test_lib_pypy/support.py +++ b/pypy/module/test_lib_pypy/support.py @@ -9,24 +9,17 @@ Raises a pytest Skip on ImportError if a skip message was specified. """ - if option.runappdirect: - try: - mod = __import__('lib_pypy.' + name) - except ImportError as e: - if skipmsg is not None: - py.test.skip('%s (%s))' % (skipmsg, str(e))) - raise - return getattr(mod, name) - try: # app-level import should find it from the right place (we # assert so afterwards) as long as a builtin module doesn't # overshadow it failed = ("%s didn't import from lib_pypy. Is a usemodules directive " "overshadowing it?" % name) - importline = ("(): import %s; assert 'lib_pypy' in %s.__file__, %r; " + importline = ("import %s; assert 'lib_pypy' in %s.__file__, %r; " "return %s" % (name, name, failed, name)) - return space.appexec([], importline) + if option.runappdirect: + importline = "from lib_pypy " + importline + return space.appexec([], "(): " + importline) except OperationError as e: if skipmsg is None or not e.match(space, space.w_ImportError): raise From noreply at buildbot.pypy.org Sat Jul 20 05:09:29 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 05:09:29 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20130720030929.7CDA21C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65509:1f1e9a2a1ffc Date: 2013-07-19 20:08 -0700 http://bitbucket.org/pypy/pypy/changeset/1f1e9a2a1ffc/ Log: adapt to py3 diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py --- a/pypy/module/test_lib_pypy/test_sha_extra.py +++ b/pypy/module/test_lib_pypy/test_sha_extra.py @@ -12,22 +12,22 @@ spaceconfig = dict(usemodules=('struct',)) def setup_class(cls): - cls.w__sha = import_lib_pypy(cls.space, '_sha') + cls.w__sha = import_lib_pypy(cls.space, '_sha1') def w_check(self, data, digest): - computed = self._sha.new(data).hexdigest() + computed = self._sha.sha1(data).hexdigest() assert computed == digest def test_case_1(self): - self.check("abc", + self.check(b"abc", "a9993e364706816aba3e25717850c26c9cd0d89d") def test_case_2(self): - self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", + self.check(b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", "84983e441c3bd26ebaae4aa1f95129e5e54670f1") def disabled_too_slow_test_case_3(self): - self.check("a" * 1000000, + self.check(b"a" * 1000000, "34aa973cd4c4daa4f61eeb2bdbad27316534016f") def test_attributes(self): @@ -35,5 +35,5 @@ assert _sha.digest_size == 20 assert _sha.digestsize == 20 assert _sha.blocksize == 1 - assert _sha.new().digest_size == 20 - assert _sha.new().digestsize == 20 + assert _sha.sha1().digest_size == 20 + assert _sha.sha1().digestsize == 20 From noreply at buildbot.pypy.org Sat Jul 20 09:16:51 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 20 Jul 2013 09:16:51 +0200 (CEST) Subject: [pypy-commit] cffi alex_gaynor/added-a-long-description-for-pypi-1374279083916: added a long description for pypi. Message-ID: <20130720071651.C38951C0130@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: alex_gaynor/added-a-long-description-for-pypi-1374279083916 Changeset: r1292:bb9b05e7384a Date: 2013-07-20 00:11 +0000 http://bitbucket.org/cffi/cffi/changeset/bb9b05e7384a/ Log: added a long description for pypi. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -81,6 +81,18 @@ setup( name='cffi', description='Foreign Function Interface for Python calling C code.', + long_description=""" +CFFI +==== + +Foreign Function Interface for Python calling C code. +Please see the `Documentation `_. + +Contact +------- + +`Mailing list `_ + """, version='0.7', packages=['cffi'], zip_safe=False, From noreply at buildbot.pypy.org Sat Jul 20 09:16:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 09:16:52 +0200 (CEST) Subject: [pypy-commit] cffi default: Merged in alex_gaynor/cffi-1/alex_gaynor/added-a-long-description-for-pypi-1374279083916 (pull request #18) Message-ID: <20130720071652.E70E91C0130@cobra.cs.uni-duesseldorf.de> Author: arigo Branch: Changeset: r1293:5da3dbba87a0 Date: 2013-07-20 09:16 +0200 http://bitbucket.org/cffi/cffi/changeset/5da3dbba87a0/ Log: Merged in alex_gaynor/cffi-1/alex_gaynor/added-a-long-description- for-pypi-1374279083916 (pull request #18) added a long description for pypi. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -81,6 +81,18 @@ setup( name='cffi', description='Foreign Function Interface for Python calling C code.', + long_description=""" +CFFI +==== + +Foreign Function Interface for Python calling C code. +Please see the `Documentation `_. + +Contact +------- + +`Mailing list `_ + """, version='0.7', packages=['cffi'], zip_safe=False, From noreply at buildbot.pypy.org Sat Jul 20 13:26:19 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Sat, 20 Jul 2013 13:26:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: commited the cog plugins for linux in order to allow a asmgcc bug report Message-ID: <20130720112619.1366C1C142B@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r508:3b7c62fe0b00 Date: 2013-07-20 13:19 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3b7c62fe0b00/ Log: commited the cog plugins for linux in order to allow a asmgcc bug report diff --git a/B2DPlugin b/B2DPlugin new file mode 100755 index 0000000000000000000000000000000000000000..73175dbc8985c3f332cd71b15ae423879607983f GIT binary patch [cut] diff --git a/BitBltPlugin b/BitBltPlugin new file mode 100755 index 0000000000000000000000000000000000000000..9656c514fe319697e7dd1e707caf7d8e27435a9c GIT binary patch [cut] From noreply at buildbot.pypy.org Sat Jul 20 14:03:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 14:03:28 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1552 resolved Message-ID: <20130720120328.CF9B81C01A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65510:30c281188d1c Date: 2013-07-20 14:02 +0200 http://bitbucket.org/pypy/pypy/changeset/30c281188d1c/ Log: issue1552 resolved Inherit GreenletExit from BaseException. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -6,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" From noreply at buildbot.pypy.org Sat Jul 20 14:10:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 14:10:17 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: More tests, checked to work on CPython's greenlet Message-ID: <20130720121017.355F71C01A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65511:76e6381f4a6f Date: 2013-07-19 20:30 +0200 http://bitbucket.org/pypy/pypy/changeset/76e6381f4a6f/ Log: More tests, checked to work on CPython's greenlet diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -341,3 +341,22 @@ assert main.switch(3, x=5) == ((3,), {'x': 5}) assert main.switch(3, x=5, y=6) == ((3,), {'x': 5, 'y': 6}) assert main.switch(2, 3, x=6) == ((2, 3), {'x': 6}) + + def test_throw_GreenletExit_not_started(self): + import greenlet + def f(): + never_executed + g = greenlet.greenlet(f) + e = greenlet.GreenletExit() + x = g.throw(e) + assert x is e + + def test_throw_GreenletExit_already_finished(self): + import greenlet + def f(): + pass + g = greenlet.greenlet(f) + g.switch() + e = greenlet.GreenletExit() + x = g.throw(e) + assert x is e From noreply at buildbot.pypy.org Sat Jul 20 14:10:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 14:10:18 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Fix the first of the two tests Message-ID: <20130720121018.5BFCF1C01A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65512:94dd4b0432e7 Date: 2013-07-19 20:33 +0200 http://bitbucket.org/pypy/pypy/changeset/94dd4b0432e7/ Log: Fix the first of the two tests diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -147,5 +147,8 @@ _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) From noreply at buildbot.pypy.org Sat Jul 20 14:10:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 14:10:19 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Fix the second test. Message-ID: <20130720121019.CA75E1C01A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65513:1c74480193aa Date: 2013-07-19 20:44 +0200 http://bitbucket.org/pypy/pypy/changeset/1c74480193aa/ Log: Fix the second test. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -57,6 +57,7 @@ def __switch(target, methodname, *baseargs): current = getcurrent() + convert_greenletexit = True # while not (target.__main or _continulet.is_pending(target)): # inlined __nonzero__ ^^^ in case it's overridden @@ -75,6 +76,16 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw' and convert_greenletexit: + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + pass + convert_greenletexit = False # try: unbound_method = getattr(_continulet, methodname) From noreply at buildbot.pypy.org Sat Jul 20 14:10:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 14:10:20 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Test for the "except:" path. Tests that the exception class is only Message-ID: <20130720121020.E9EB21C01A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65514:02f3dda99f30 Date: 2013-07-19 20:51 +0200 http://bitbucket.org/pypy/pypy/changeset/02f3dda99f30/ Log: Test for the "except:" path. Tests that the exception class is only instantiated once, and fix. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -84,8 +85,8 @@ methodname = 'switch' baseargs = (((e,), {}),) except: - pass - convert_greenletexit = False + baseargs = sys.exc_info()[:2] + baseargs[2:] + convert_greenletexit = False # try: unbound_method = getattr(_continulet, methodname) diff --git a/pypy/module/test_lib_pypy/test_greenlet.py b/pypy/module/test_lib_pypy/test_greenlet.py --- a/pypy/module/test_lib_pypy/test_greenlet.py +++ b/pypy/module/test_lib_pypy/test_greenlet.py @@ -360,3 +360,21 @@ e = greenlet.GreenletExit() x = g.throw(e) assert x is e + + def test_throw_exception_already_finished(self): + import greenlet + def f(): + pass + g = greenlet.greenlet(f) + g.switch() + seen = [] + class MyException(Exception): + def __init__(self): + seen.append(1) + try: + g.throw(MyException) + except MyException: + pass + else: + raise AssertionError("no exception??") + assert seen == [1] From noreply at buildbot.pypy.org Sat Jul 20 14:10:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 14:10:22 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Kill this now-not-useful variable Message-ID: <20130720121022.197F31C01A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65515:ebaa755f76fa Date: 2013-07-19 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/ebaa755f76fa/ Log: Kill this now-not-useful variable diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -58,7 +58,6 @@ def __switch(target, methodname, *baseargs): current = getcurrent() - convert_greenletexit = True # while not (target.__main or _continulet.is_pending(target)): # inlined __nonzero__ ^^^ in case it's overridden @@ -78,7 +77,7 @@ # will show that the program is caught in this loop here.) target = target.parent # convert a "raise GreenletExit" into "return GreenletExit" - if methodname == 'throw' and convert_greenletexit: + if methodname == 'throw': try: raise baseargs[0], baseargs[1] except GreenletExit, e: @@ -86,7 +85,6 @@ baseargs = (((e,), {}),) except: baseargs = sys.exc_info()[:2] + baseargs[2:] - convert_greenletexit = False # try: unbound_method = getattr(_continulet, methodname) From noreply at buildbot.pypy.org Sat Jul 20 14:10:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 14:10:23 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: issue1552 resolved Message-ID: <20130720121023.49B5C1C01A0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65516:b33e576be3b6 Date: 2013-07-20 14:02 +0200 http://bitbucket.org/pypy/pypy/changeset/b33e576be3b6/ Log: issue1552 resolved Inherit GreenletExit from BaseException. diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -6,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" From noreply at buildbot.pypy.org Sat Jul 20 21:14:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 20 Jul 2013 21:14:59 +0200 (CEST) Subject: [pypy-commit] stmgc default: Kill this attempted test. I now belive that mark_private_from_protected() Message-ID: <20130720191459.296851C0130@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r428:0168e8f4f970 Date: 2013-07-20 19:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/0168e8f4f970/ Log: Kill this attempted test. I now belive that mark_private_from_protected() would always result in the young private_from_protected objects having the GCFLAG_WRITE_BARRIER eventually added. diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -225,8 +225,17 @@ assert(items[i]->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); assert(IS_POINTER(items[i]->h_revision)); + /* if items[i] is young, move it, update the pointer, and + schedule the object for later consideration by + visit_all_outside_objects() (which will for example ensure + that the WRITE_BARRIER flag is added to it). + */ visit_if_young(&items[i]); + /* the backup copy is always allocated outside the nursery, + but we have to trace it as well, as it may contain its own + young pointers. + */ stmgc_trace((gcptr)items[i]->h_revision, &visit_if_young); } diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -200,42 +200,6 @@ check_not_free(p2) assert classify(p2) == "private" -def test_old_private_from_protected_to_young_private_2(): - py.test.skip("not valid") - p0 = nalloc_refs(1) - lib.stm_commit_transaction() - lib.stm_begin_inevitable_transaction() - lib.setptr(p0, 0, ffi.NULL) - assert classify(p0) == "private_from_protected" - assert lib.in_nursery(p0) # a young private_from_protected - # - lib.stm_push_root(p0) - minor_collect() - p0 = lib.stm_pop_root() - assert classify(p0) == "private_from_protected" - assert not lib.in_nursery(p0) # becomes an old private_from_protected - # - # Because it's a private_from_protected, its h_revision is a pointer - # to the backup copy, and not stm_private_rev_num. It means that the - # write barrier will always enter its slow path, even though the - # GCFLAG_WRITE_BARRIER is not set. - assert p0.h_revision != lib.get_private_rev_num() - assert not (p0.h_tid & GCFLAG_WRITE_BARRIER) - # - p1 = nalloc(HDR) - lib.setptr(p0, 0, p1) # should trigger the write barrier again - assert classify(p0) == "private_from_protected" - lib.stm_push_root(p0) - minor_collect() - p0b = lib.stm_pop_root() - assert p0b == p0 - check_nursery_free(p1) - assert classify(p0) == "private_from_protected" - p2 = lib.getptr(p0, 0) - assert not lib.in_nursery(p2) - check_not_free(p2) - assert classify(p2) == "private" - def test_old_private_from_protected_to_young_private_3(): p0 = palloc_refs(1) pw = lib.stm_write_barrier(p0) From noreply at buildbot.pypy.org Sat Jul 20 23:54:50 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 23:54:50 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: merge py3k Message-ID: <20130720215450.07E7E1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-memoryview Changeset: r65517:73caa44dd27b Date: 2013-07-19 22:00 -0700 http://bitbucket.org/pypy/pypy/changeset/73caa44dd27b/ Log: merge py3k diff too long, truncating to 2000 out of 13824 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -219,32 +282,32 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib-python/3/test/test_float.py b/lib-python/3/test/test_float.py --- a/lib-python/3/test/test_float.py +++ b/lib-python/3/test/test_float.py @@ -170,7 +170,8 @@ def test_float_containment(self): floats = (INF, -INF, 0.0, 1.0, NAN) for f in floats: - self.assertIn(f, [f]) + if f is NAN and support.check_impl_detail(pypy=False): + self.assertIn(f, [f]) self.assertIn(f, (f,)) self.assertIn(f, {f}) self.assertIn(f, {f: None}) diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -190,6 +190,8 @@ sys.setcheckinterval(n) self.assertEqual(sys.getcheckinterval(), n) + @unittest.skipUnless(hasattr(sys, 'setswitchinterval'), + 'The new GIL is an implementation detail') @unittest.skipUnless(threading, 'Threading required for this test.') def test_switchinterval(self): self.assertRaises(TypeError, sys.setswitchinterval) diff --git a/lib-python/3/test/test_threading.py b/lib-python/3/test/test_threading.py --- a/lib-python/3/test/test_threading.py +++ b/lib-python/3/test/test_threading.py @@ -346,10 +346,15 @@ # Try hard to trigger #1703448: a thread is still returned in # threading.enumerate() after it has been join()ed. enum = threading.enumerate - old_interval = sys.getswitchinterval() + newgil = hasattr(sys, 'getswitchinterval') + if newgil: + geti, seti = sys.getswitchinterval, sys.setswitchinterval + else: + geti, seti = sys.getcheckinterval, sys.setcheckinterval + old_interval = geti() try: for i in range(1, 100): - sys.setswitchinterval(i * 0.0002) + seti(i * 0.0002 if newgil else i // 5) t = threading.Thread(target=lambda: None) t.start() t.join() @@ -357,7 +362,7 @@ self.assertNotIn(t, l, "#1703448 triggered after %d trials: %s" % (i, l)) finally: - sys.setswitchinterval(old_interval) + seti(old_interval) @test.support.cpython_only def test_no_refcycle_through_target(self): diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -16,8 +16,8 @@ from .app import TkApp -TK_VERSION = tkffi.string(tklib.get_tk_version()) -TCL_VERSION = tkffi.string(tklib.get_tcl_version()) +TK_VERSION = tkffi.string(tklib.get_tk_version()).decode('utf-8') +TCL_VERSION = tkffi.string(tklib.get_tcl_version()).decode('utf-8') READABLE = tklib.TCL_READABLE WRITABLE = tklib.TCL_WRITABLE @@ -26,7 +26,7 @@ def create(screenName=None, baseName=None, className=None, interactive=False, wantobjects=False, wantTk=True, sync=False, use=None): - return TkApp(screenName, baseName, className, + return TkApp(screenName, className, interactive, wantobjects, wantTk, sync, use) def _flatten(item): diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -9,15 +9,15 @@ def varname_converter(input): if isinstance(input, TclObject): return input.string - return input + return input.encode('utf-8') def Tcl_AppInit(app): if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: app.raiseTclError() skip_tk_init = tklib.Tcl_GetVar( - app.interp, "_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) - if skip_tk_init and tkffi.string(skip_tk_init) == "1": + app.interp, b"_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) + if skip_tk_init and tkffi.string(skip_tk_init) == b"1": return if tklib.Tk_Init(app.interp) == tklib.TCL_ERROR: @@ -38,7 +38,8 @@ self = tkffi.from_handle(clientData) assert self.app.interp == interp try: - args = [tkffi.string(arg) for arg in argv[1:argc]] + args = [tkffi.string(arg).decode('utf-8') + for arg in argv[1:argc]] result = self.func(*args) obj = AsObj(result) tklib.Tcl_SetObjResult(interp, obj) @@ -58,7 +59,7 @@ class TkApp(object): - def __new__(cls, screenName, baseName, className, + def __new__(cls, screenName, className, interactive, wantobjects, wantTk, sync, use): if not wantobjects: raise NotImplementedError("wantobjects=True only") @@ -66,7 +67,7 @@ self.interp = tklib.Tcl_CreateInterp() self._wantobjects = wantobjects self.threaded = bool(tklib.Tcl_GetVar2Ex( - self.interp, "tcl_platform", "threaded", + self.interp, b"tcl_platform", b"threaded", tklib.TCL_GLOBAL_ONLY)) self.thread_id = tklib.Tcl_GetCurrentThread() self.dispatching = False @@ -77,26 +78,27 @@ self._commands = {} # Delete the 'exit' command, which can screw things up - tklib.Tcl_DeleteCommand(self.interp, "exit") + tklib.Tcl_DeleteCommand(self.interp, b"exit") if screenName is not None: - tklib.Tcl_SetVar2(self.interp, "env", "DISPLAY", screenName, + tklib.Tcl_SetVar2(self.interp, b"env", b"DISPLAY", + screenName.encode('utf-8'), tklib.TCL_GLOBAL_ONLY) if interactive: - tklib.Tcl_SetVar(self.interp, "tcl_interactive", "1", + tklib.Tcl_SetVar(self.interp, b"tcl_interactive", b"1", tklib.TCL_GLOBAL_ONLY) else: - tklib.Tcl_SetVar(self.interp, "tcl_interactive", "0", + tklib.Tcl_SetVar(self.interp, b"tcl_interactive", b"0", tklib.TCL_GLOBAL_ONLY) # This is used to get the application class for Tk 4.1 and up - argv0 = className.lower() - tklib.Tcl_SetVar(self.interp, "argv0", argv0, + argv0 = className.lower().encode('utf-8') + tklib.Tcl_SetVar(self.interp, b"argv0", argv0, tklib.TCL_GLOBAL_ONLY) if not wantTk: - tklib.Tcl_SetVar(self.interp, "_tkinter_skip_tk_init", "1", + tklib.Tcl_SetVar(self.interp, b"_tkinter_skip_tk_init", b"1", tklib.TCL_GLOBAL_ONLY) # some initial arguments need to be in argv @@ -123,8 +125,9 @@ def raiseTclError(self): if self.errorInCmd: self.errorInCmd = False - raise self.exc_info[0], self.exc_info[1], self.exc_info[2] - raise TclError(tkffi.string(tklib.Tcl_GetStringResult(self.interp))) + raise self.exc_info[1].with_traceback(self.exc_info[2]) + raise TclError(tkffi.string( + tklib.Tcl_GetStringResult(self.interp)).decode('utf-8')) def wantobjects(self): return self._wantobjects @@ -135,11 +138,11 @@ def loadtk(self): # We want to guard against calling Tk_Init() multiple times - err = tklib.Tcl_Eval(self.interp, "info exists tk_version") + err = tklib.Tcl_Eval(self.interp, b"info exists tk_version") if err == tklib.TCL_ERROR: self.raiseTclError() tk_exists = tklib.Tcl_GetStringResult(self.interp) - if not tk_exists or tkffi.string(tk_exists) != "1": + if not tk_exists or tkffi.string(tk_exists) != b"1": err = tklib.Tk_Init(self.interp) if err == tklib.TCL_ERROR: self.raiseTclError() @@ -220,7 +223,7 @@ raise NotImplementedError("Call from another thread") res = tklib.Tcl_CreateCommand( - self.interp, cmdName, _CommandData.PythonCmd, + self.interp, cmdName.encode('utf-8'), _CommandData.PythonCmd, clientData, _CommandData.PythonCmdDelete) if not res: raise TclError("can't create Tcl command") @@ -229,7 +232,7 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + res = tklib.Tcl_DeleteCommand(self.interp, cmdName.encode('utf-8')) if res == -1: raise TclError("can't delete Tcl command") @@ -280,17 +283,19 @@ def eval(self, script): self._check_tcl_appartment() - res = tklib.Tcl_Eval(self.interp, script) + res = tklib.Tcl_Eval(self.interp, script.encode('utf-8')) if res == tklib.TCL_ERROR: self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + result = tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + return result.decode('utf-8') def evalfile(self, filename): self._check_tcl_appartment() - res = tklib.Tcl_EvalFile(self.interp, filename) + res = tklib.Tcl_EvalFile(self.interp, filename.encode('utf-8')) if res == tklib.TCL_ERROR: self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + result = tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + return result.decode('utf-8') def split(self, arg): if isinstance(arg, tuple): @@ -301,7 +306,7 @@ def splitlist(self, arg): if isinstance(arg, tuple): return arg - if isinstance(arg, unicode): + if isinstance(arg, str): arg = arg.encode('utf8') argc = tkffi.new("int*") @@ -310,7 +315,7 @@ if res == tklib.TCL_ERROR: self.raiseTclError() - result = tuple(tkffi.string(argv[0][i]) + result = tuple(tkffi.string(argv[0][i]).decode('utf-8') for i in range(argc[0])) tklib.Tcl_Free(argv[0]) return result @@ -326,7 +331,7 @@ for elem, newelem in zip(arg, newelems): if elem is not newelem: return newelems - elif isinstance(arg, str): + elif isinstance(arg, bytes): argc = tkffi.new("int*") argv = tkffi.new("char***") res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) @@ -345,7 +350,7 @@ # Not a list. # Could be a quoted string containing funnies, e.g. {"}. # Return the string itself. - return arg + return arg.decode('utf-8') try: if argc[0] == 0: @@ -361,6 +366,7 @@ def getboolean(self, s): if isinstance(s, int): return s + s = s.encode('utf-8') v = tkffi.new("int*") res = tklib.Tcl_GetBoolean(self.interp, s, v) if res == tklib.TCL_ERROR: @@ -383,7 +389,7 @@ self.quitMainLoop = False if self.errorInCmd: self.errorInCmd = False - raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + raise self.exc_info[1].with_traceback(self.exc_info[2]) def quit(self): self.quitMainLoop = True diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -4,13 +4,13 @@ class TypeCache(object): def __init__(self): - self.BooleanType = tklib.Tcl_GetObjType("boolean") - self.ByteArrayType = tklib.Tcl_GetObjType("bytearray") - self.DoubleType = tklib.Tcl_GetObjType("double") - self.IntType = tklib.Tcl_GetObjType("int") - self.ListType = tklib.Tcl_GetObjType("list") - self.ProcBodyType = tklib.Tcl_GetObjType("procbody") - self.StringType = tklib.Tcl_GetObjType("string") + self.BooleanType = tklib.Tcl_GetObjType(b"boolean") + self.ByteArrayType = tklib.Tcl_GetObjType(b"bytearray") + self.DoubleType = tklib.Tcl_GetObjType(b"double") + self.IntType = tklib.Tcl_GetObjType(b"int") + self.ListType = tklib.Tcl_GetObjType(b"list") + self.ProcBodyType = tklib.Tcl_GetObjType(b"procbody") + self.StringType = tklib.Tcl_GetObjType(b"string") def FromObj(app, value): @@ -18,13 +18,7 @@ typeCache = app._typeCache if not value.typePtr: buf = tkffi.buffer(value.bytes, value.length) - result = buf[:] - # If the result contains any bytes with the top bit set, it's - # UTF-8 and we should decode it to Unicode. - try: - result.decode('ascii') - except UnicodeDecodeError: - result = result.decode('utf8') + result = buf[:].decode('utf8') return result elif value.typePtr == typeCache.BooleanType: @@ -60,12 +54,15 @@ return TclObject(value) def AsObj(value): - if isinstance(value, str): + if isinstance(value, bytes): return tklib.Tcl_NewStringObj(value, len(value)) elif isinstance(value, bool): return tklib.Tcl_NewBooleanObj(value) elif isinstance(value, int): - return tklib.Tcl_NewLongObj(value) + try: + return tklib.Tcl_NewLongObj(value) + except OverflowError: + pass # and fall through to default object handling. elif isinstance(value, float): return tklib.Tcl_NewDoubleObj(value) elif isinstance(value, tuple): @@ -73,16 +70,16 @@ for i in range(len(value)): argv[i] = AsObj(value[i]) return tklib.Tcl_NewListObj(len(value), argv) - elif isinstance(value, unicode): + elif isinstance(value, str): encoded = value.encode('utf-16')[2:] buf = tkffi.new("char[]", encoded) inbuf = tkffi.cast("Tcl_UniChar*", buf) - return tklib.Tcl_NewUnicodeObj(buf, len(encoded)/2) + return tklib.Tcl_NewUnicodeObj(buf, len(encoded)//2) elif isinstance(value, TclObject): tklib.Tcl_IncrRefCount(value._value) return value._value - else: - return AsObj(str(value)) + + return AsObj(str(value)) class TclObject(object): def __new__(cls, value): @@ -98,17 +95,15 @@ def __str__(self): if self._string and isinstance(self._string, str): return self._string - return tkffi.string(tklib.Tcl_GetString(self._value)) + return tkffi.string(tklib.Tcl_GetString(self._value)).decode('utf-8') @property def string(self): + "the string representation of this object, either as str or bytes" if self._string is None: length = tkffi.new("int*") s = tklib.Tcl_GetStringFromObj(self._value, length) value = tkffi.buffer(s, length[0])[:] - try: - value.decode('ascii') - except UnicodeDecodeError: - value = value.decode('utf8') + value = value.decode('utf8') self._string = value return self._string diff --git a/lib_pypy/ctypes_config_cache/syslog.ctc.py b/lib_pypy/ctypes_config_cache/syslog.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/syslog.ctc.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -'ctypes_configure' source for syslog.py. -Run this to rebuild _syslog_cache.py. -""" - -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger) -import dumpcache - - -_CONSTANTS = ( - 'LOG_EMERG', - 'LOG_ALERT', - 'LOG_CRIT', - 'LOG_ERR', - 'LOG_WARNING', - 'LOG_NOTICE', - 'LOG_INFO', - 'LOG_DEBUG', - - 'LOG_PID', - 'LOG_CONS', - 'LOG_NDELAY', - - 'LOG_KERN', - 'LOG_USER', - 'LOG_MAIL', - 'LOG_DAEMON', - 'LOG_AUTH', - 'LOG_LPR', - 'LOG_LOCAL0', - 'LOG_LOCAL1', - 'LOG_LOCAL2', - 'LOG_LOCAL3', - 'LOG_LOCAL4', - 'LOG_LOCAL5', - 'LOG_LOCAL6', - 'LOG_LOCAL7', -) -_OPTIONAL_CONSTANTS = ( - 'LOG_NOWAIT', - 'LOG_PERROR', - - 'LOG_SYSLOG', - 'LOG_CRON', - 'LOG_UUCP', - 'LOG_NEWS', -) - -# Constant aliases if there are not defined -_ALIAS = ( - ('LOG_SYSLOG', 'LOG_DAEMON'), - ('LOG_CRON', 'LOG_DAEMON'), - ('LOG_NEWS', 'LOG_MAIL'), - ('LOG_UUCP', 'LOG_MAIL'), -) - -class SyslogConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/syslog.h']) -for key in _CONSTANTS: - setattr(SyslogConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(SyslogConfigure, key, DefinedConstantInteger(key)) - -config = configure(SyslogConfigure) -for key in _OPTIONAL_CONSTANTS: - if config[key] is None: - del config[key] -for alias, key in _ALIAS: - config.setdefault(alias, config[key]) - -all_constants = config.keys() -all_constants.sort() -config['ALL_CONSTANTS'] = tuple(all_constants) -dumpcache.dumpcache2('syslog', config) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,5 @@ +import sys +import __pypy__ import _continuation __version__ = "0.4.0" @@ -46,16 +48,16 @@ if parent is not None: self.parent = parent - def switch(self, *args): + def switch(self, *args, **kwds): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch('switch', args) + return self.__switch('switch', (args, kwds)) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" return self.__switch('throw', typ, val, tb) - def __switch(target, methodname, *args): + def __switch(target, methodname, *baseargs): current = getcurrent() # while not (target.__main or _continulet.is_pending(target)): @@ -65,9 +67,9 @@ greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw - _continulet.__init__(target, greenlet_func, *args) + _continulet.__init__(target, greenlet_func, *baseargs) methodname = 'switch' - args = () + baseargs = () target.__started = True break # already done, go to the parent instead @@ -75,14 +77,27 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise __pypy__.normalize_exc(baseargs[0], baseargs[1]) + except GreenletExit as e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) - args = unbound_method(current, *args, to=target) + args, kwds = unbound_method(current, *baseargs, to=target) finally: _tls.current = current # - if len(args) == 1: + if kwds: + if args: + return args, kwds + return kwds + elif len(args) == 1: return args[0] else: return args @@ -129,18 +144,22 @@ _tls.current = gmain def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: - res = greenlet.run(*args) + res = greenlet.run(*args, **kwds) except GreenletExit as e: res = e finally: _continuation.permute(greenlet, greenlet.parent) - return (res,) + return ((res,), None) def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet try: - raise value.with_traceback(tb) + raise __pypy__.normalize_exc(exc, value, tb) + except GreenletExit as e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -9,6 +9,7 @@ from ctypes import Structure, c_char_p, c_int, POINTER from ctypes_support import standard_c_lib as libc +import _structseq try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -24,32 +25,12 @@ ('gr_mem', POINTER(c_char_p)), ) -class Group(object): - def __init__(self, gr_name, gr_passwd, gr_gid, gr_mem): - self.gr_name = gr_name - self.gr_passwd = gr_passwd - self.gr_gid = gr_gid - self.gr_mem = gr_mem +class struct_group(metaclass=_structseq.structseqtype): - def __getitem__(self, item): - if item == 0: - return self.gr_name - elif item == 1: - return self.gr_passwd - elif item == 2: - return self.gr_gid - elif item == 3: - return self.gr_mem - else: - raise IndexError(item) - - def __len__(self): - return 4 - - def __repr__(self): - return str((self.gr_name, self.gr_passwd, self.gr_gid, self.gr_mem)) - - # whatever else... + gr_name = _structseq.structseqfield(0) + gr_passwd = _structseq.structseqfield(1) + gr_gid = _structseq.structseqfield(2) + gr_mem = _structseq.structseqfield(3) libc.getgrgid.argtypes = [gid_t] libc.getgrgid.restype = POINTER(GroupStruct) @@ -72,10 +53,10 @@ while res.contents.gr_mem[i]: mem.append(res.contents.gr_mem[i]) i += 1 - return Group(os.fsdecode(res.contents.gr_name), - os.fsdecode(res.contents.gr_passwd), - res.contents.gr_gid, - mem) + return struct_group((os.fsdecode(res.contents.gr_name), + os.fsdecode(res.contents.gr_passwd), + res.contents.gr_gid, + mem)) @builtinify def getgrgid(gid): diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py --- a/lib_pypy/pyrepl/curses.py +++ b/lib_pypy/pyrepl/curses.py @@ -19,11 +19,15 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -# avoid importing the whole curses, if possible -try: +# If we are running on top of pypy, we import only _minimal_curses. +# Don't try to fall back to _curses, because that's going to use cffi +# and fall again more loudly. +import sys +if '__pypy__' in sys.builtin_module_names: # pypy case import _minimal_curses as _curses -except ImportError: +else: + # cpython case try: import _curses except ImportError: diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -55,6 +55,11 @@ else: return c +if 'a'[0] == 'a': + # When running tests with python2, bytes characters are bytes. + def _my_unctrl(c, uc=_my_unctrl): + return uc(ord(c)) + def disp_str(buffer, join=''.join, uc=_my_unctrl): """ disp_str(buffer:string) -> (string, [int]) @@ -519,7 +524,7 @@ def do_cmd(self, cmd): #print cmd - if isinstance(cmd[0], str): + if isinstance(cmd[0], (str, unicode)): cmd = self.commands.get(cmd[0], commands.invalid_command)(self, *cmd) elif isinstance(cmd[0], type): diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -1,3 +1,4 @@ +# this cffi version was rewritten based on the # ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides an interface to the Unix syslog library routines. @@ -9,34 +10,84 @@ if sys.platform == 'win32': raise ImportError("No syslog on Windows") -# load the platform-specific cache made by running syslog.ctc.py -from ctypes_config_cache._syslog_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes import c_int, c_char_p +from cffi import FFI try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +ffi = FFI() -# Real prototype is: -# void syslog(int priority, const char *format, ...); -# But we also need format ("%s") and one format argument (message) -_syslog = libc.syslog -_syslog.argtypes = (c_int, c_char_p, c_char_p) -_syslog.restype = None +ffi.cdef(""" +/* mandatory constants */ +#define LOG_EMERG ... +#define LOG_ALERT ... +#define LOG_CRIT ... +#define LOG_ERR ... +#define LOG_WARNING ... +#define LOG_NOTICE ... +#define LOG_INFO ... +#define LOG_DEBUG ... -_openlog = libc.openlog -_openlog.argtypes = (c_char_p, c_int, c_int) -_openlog.restype = None +#define LOG_PID ... +#define LOG_CONS ... +#define LOG_NDELAY ... -_closelog = libc.closelog -_closelog.argtypes = None -_closelog.restype = None +#define LOG_KERN ... +#define LOG_USER ... +#define LOG_MAIL ... +#define LOG_DAEMON ... +#define LOG_AUTH ... +#define LOG_LPR ... +#define LOG_LOCAL0 ... +#define LOG_LOCAL1 ... +#define LOG_LOCAL2 ... +#define LOG_LOCAL3 ... +#define LOG_LOCAL4 ... +#define LOG_LOCAL5 ... +#define LOG_LOCAL6 ... +#define LOG_LOCAL7 ... -_setlogmask = libc.setlogmask -_setlogmask.argtypes = (c_int,) -_setlogmask.restype = c_int +/* optional constants, gets defined to -919919 if missing */ +#define LOG_NOWAIT ... +#define LOG_PERROR ... + +/* aliased constants, gets defined as some other constant if missing */ +#define LOG_SYSLOG ... +#define LOG_CRON ... +#define LOG_UUCP ... +#define LOG_NEWS ... + +/* functions */ +void openlog(const char *ident, int option, int facility); +void syslog(int priority, const char *format, const char *string); +// NB. the signature of syslog() is specialized to the only case we use +void closelog(void); +int setlogmask(int mask); +""") + +lib = ffi.verify(""" +#include + +#ifndef LOG_NOWAIT +#define LOG_NOWAIT -919919 +#endif +#ifndef LOG_PERROR +#define LOG_PERROR -919919 +#endif +#ifndef LOG_SYSLOG +#define LOG_SYSLOG LOG_DAEMON +#endif +#ifndef LOG_CRON +#define LOG_CRON LOG_DAEMON +#endif +#ifndef LOG_UUCP +#define LOG_UUCP LOG_MAIL +#endif +#ifndef LOG_NEWS +#define LOG_NEWS LOG_MAIL +#endif +""") + _S_log_open = False _S_ident_o = None @@ -52,17 +103,19 @@ return None @builtinify -def openlog(ident=None, logoption=0, facility=LOG_USER): +def openlog(ident=None, logoption=0, facility=lib.LOG_USER): global _S_ident_o, _S_log_open if ident is None: ident = _get_argv() - if ident is not None: + if ident is None: + _S_ident_o = ffi.NULL + else: if not isinstance(ident, str): msg = "openlog() argument 1 must be a str, not {!r}" raise TypeError(msg.format(type(ident).__name__)) ident = ident.encode(sys.getdefaultencoding()) - _S_ident_o = c_char_p(ident) # keepalive - _openlog(_S_ident_o, logoption, facility) + _S_ident_o = ffi.new("char[]", ident) # keepalive + lib.openlog(_S_ident_o, logoption, facility) _S_log_open = True @builtinify @@ -78,19 +131,19 @@ raise TypeError("syslog() message must be a str, not {!r}".format( type(message).__name__)) message = message.encode(sys.getdefaultencoding()) - _syslog(priority, b"%s", message) + lib.syslog(priority, b"%s", message) @builtinify def closelog(): global _S_log_open, S_ident_o if _S_log_open: - _closelog() + lib.closelog() _S_log_open = False _S_ident_o = None @builtinify def setlogmask(mask): - return _setlogmask(mask) + return lib.setlogmask(mask) @builtinify def LOG_MASK(pri): @@ -100,8 +153,15 @@ def LOG_UPTO(pri): return (1 << (pri + 1)) - 1 -__all__ = ALL_CONSTANTS + ( +__all__ = [] + +for name in sorted(lib.__dict__): + if name.startswith('LOG_'): + value = getattr(lib, name) + if value != -919919: + globals()[name] = value + __all__.append(name) + +__all__ = tuple(__all__) + ( 'openlog', 'syslog', 'closelog', 'setlogmask', 'LOG_MASK', 'LOG_UPTO') - -del ALL_CONSTANTS diff --git a/pypy/TODO b/pypy/TODO --- a/pypy/TODO +++ b/pypy/TODO @@ -1,28 +1,8 @@ TODO for the python3 test suite: -* test_float - nan = float('nan'); assert nan in [nan] - This has always been true in CPython, it is now guaranteed that the - containers use the "is" operator as an optimization. - Difficult in pypy because optimized containers are arrays of - unwrapped doubles. A possible solution is to special-case nan in - FloatListStrategy.unwrap(). - * test_memoryview Needs bytes/str changes. Probably easy. Work for this has begun on - py3k-memoryview (by mjacob) - -* test_pep263 - Tracebacks should be able to print unicode source code. This is - really due to the tokenizer not being fully unicode aware. The - parser can somewhat hack around this but maybe not completely - -* test_sys -* test_threading: - Missing sys.getswitchinterval(). https://bugs.pypy.org/issue1470 - We would be interesting to implement the new thread switching - logic, it's a lot of work though. - + py3k-memoryview (by mjacob) https://bugs.pypy.org/issue1542 own-tests: @@ -34,30 +14,22 @@ structseq now subclasses tuple on py3, which breaks how BaseCpyTypeDescr.realize allocates it -* module.marshal.test.test_marshal - Handling of exceptions w/ bad data? Or is the test wrong? - -* objspace.std.test.test_floatobject test_from_string - The unicode-based number parsing routines don't raise UnicodeErrors, - but more importantly they raise unprintable exceptions - antocuni's older TODO: -run coverage against the parser/astbuilder/astcompiler: it's probably full of +* run coverage against the parser/astbuilder/astcompiler: it's probably full of dead code because the grammar changed -re-enable IntDictStrategy +* re-enable strategies https://bugs.pypy.org/issue1540 : + - re-enable IntDictStrategy + - re-enable StdObjSpace.listview_str + - re-enable the kwargs dict strategy in dictmultiobject.py + - re-enable view_as_kwargs -re-enable StdObjSpace.listview_str +* unskip numpypy tests in module/test_lib_pypy/numpypy/ -re-enable the kwargs dict strategy in dictmultiobject.py -re-enable view_as_kwargs - -unskip numpypy tests in module/test_lib_pypy/numpypy/ - -optimize W_UnicodeObject, right now it stores both an unicode and an utf8 +* optimize W_UnicodeObject, right now it stores both an unicode and an utf8 version of the same string -re-enable BUILD_LIST_FROM_ARG: see the comment in astcompiler/codegen.py in +* re-enable BUILD_LIST_FROM_ARG: see the comment in astcompiler/codegen.py in ast.ListComp.build_container diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -36,7 +36,7 @@ "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_ffi", "_continuation", "_csv", "_cffi_backend", - "_posixsubprocess", # "cppyy", "micronumpy", + "_posixsubprocess", "_pypyjson", # "cppyy", "micronumpy", ] )) @@ -147,7 +147,7 @@ requires=module_dependencies.get(modname, []), suggests=module_suggests.get(modname, []), negation=modname not in essential_modules, - validator=get_module_validator(modname)) + ) #validator=get_module_validator(modname)) for modname in all_modules]), BoolOption("allworkingmodules", "use as many working modules as possible", diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -306,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -322,8 +335,35 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -32,11 +32,10 @@ * go to pypy/tool/release and run: force-builds.py /release/ * wait for builds to complete, make sure there are no failures -* run pypy/tool/release/make_release.py, this will build necessary binaries - and upload them to pypy.org +* upload binaries to https://bitbucket.org/pypy/pypy/downloads Following binaries should be built, however, we need more buildbots: - JIT: windows, linux, os/x + JIT: windows, linux, os/x, armhf, armel From noreply at buildbot.pypy.org Sat Jul 20 23:54:51 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 23:54:51 +0200 (CEST) Subject: [pypy-commit] pypy py3k: linux needs these additionally Message-ID: <20130720215451.5F9321C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65518:7dc13d67886b Date: 2013-07-20 14:45 -0700 http://bitbucket.org/pypy/pypy/changeset/7dc13d67886b/ Log: linux needs these additionally diff --git a/pypy/module/test_lib_pypy/test_pwd.py b/pypy/module/test_lib_pypy/test_pwd.py --- a/pypy/module/test_lib_pypy/test_pwd.py +++ b/pypy/module/test_lib_pypy/test_pwd.py @@ -1,7 +1,8 @@ import py, sys class AppTestPwd: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools', 'binascii')) + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools', 'binascii', + 'fcntl', 'select', 'signal')) def setup_class(cls): if sys.platform == 'win32': From noreply at buildbot.pypy.org Sat Jul 20 23:54:52 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 20 Jul 2013 23:54:52 +0200 (CEST) Subject: [pypy-commit] pypy py3k: py3's json operates on unicode/rejects bytes Message-ID: <20130720215452.A084D1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65519:8ba70e2a8eb9 Date: 2013-07-20 14:47 -0700 http://bitbucket.org/pypy/pypy/changeset/8ba70e2a8eb9/ Log: py3's json operates on unicode/rejects bytes diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -387,9 +387,9 @@ return 0x10000 + (((highsurr - 0xd800) << 10) | (lowsurr - 0xdc00)) def loads(space, w_s): - if space.isinstance_w(w_s, space.w_unicode): - raise OperationError(space.w_TypeError, - space.wrap("Expected utf8-encoded str, got unicode")) + if space.isinstance_w(w_s, space.w_bytes): + raise operationerrfmt(space.w_TypeError, + "Expected string, got %T", w_s) s = space.str_w(w_s) decoder = JSONDecoder(space, s) try: diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -16,9 +16,9 @@ class AppTest(object): spaceconfig = {"objspace.usemodules._pypyjson": True} - def test_raise_on_unicode(self): + def test_raise_on_bytes(self): import _pypyjson - raises(TypeError, _pypyjson.loads, "42") + raises(TypeError, _pypyjson.loads, b"42") def test_decode_constants(self): From noreply at buildbot.pypy.org Sun Jul 21 09:32:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 21 Jul 2013 09:32:08 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add a comment that finally settles my mind Message-ID: <20130721073208.B4B491C301A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r429:d633d35aa61e Date: 2013-07-21 09:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/d633d35aa61e/ Log: Add a comment that finally settles my mind diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -235,6 +235,12 @@ /* the backup copy is always allocated outside the nursery, but we have to trace it as well, as it may contain its own young pointers. + + but only once: if the transaction was running for long enough + to have num_private_from_protected_known_old > 0, then the + backup copies of known-old objects have already been traced + in a previous minor collection, and as they are read-only, + they cannot contain young pointers any more. */ stmgc_trace((gcptr)items[i]->h_revision, &visit_if_young); } From noreply at buildbot.pypy.org Sun Jul 21 22:28:04 2013 From: noreply at buildbot.pypy.org (liquibits) Date: Sun, 21 Jul 2013 22:28:04 +0200 (CEST) Subject: [pypy-commit] pypy default: docs typos Message-ID: <20130721202804.B1E621C0162@cobra.cs.uni-duesseldorf.de> Author: Pawe? Piotr Przeradowski Branch: Changeset: r65520:e9c81df4f219 Date: 2013-07-21 22:22 +0200 http://bitbucket.org/pypy/pypy/changeset/e9c81df4f219/ Log: docs typos diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written From noreply at buildbot.pypy.org Sun Jul 21 22:40:07 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 21 Jul 2013 22:40:07 +0200 (CEST) Subject: [pypy-commit] pypy py3k: more usemodules for linux Message-ID: <20130721204007.E0CB71C0162@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65521:1dd791746a4e Date: 2013-07-21 13:39 -0700 http://bitbucket.org/pypy/pypy/changeset/1dd791746a4e/ Log: more usemodules for linux diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -3,7 +3,8 @@ class AppTestGrp: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'fcntl', 'itertools', + 'select', 'signal')) def setup_class(cls): cls.w_grp = import_lib_pypy(cls.space, 'grp', diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -10,7 +10,8 @@ class AppTestOsWait: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'fcntl', 'itertools', + 'select', 'signal')) def setup_class(cls): if not hasattr(os, "fork"): diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -6,7 +6,8 @@ class AppTestResource: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'fcntl', 'itertools', + 'select', 'signal')) def setup_class(cls): rebuild.rebuild_one('resource.ctc.py') From noreply at buildbot.pypy.org Mon Jul 22 09:06:50 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 22 Jul 2013 09:06:50 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: repeat write barriers after a possible minor collection Message-ID: <20130722070650.4676D1C07B6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65522:a9400c2707e9 Date: 2013-07-22 08:31 +0200 http://bitbucket.org/pypy/pypy/changeset/a9400c2707e9/ Log: repeat write barriers after a possible minor collection diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -70,6 +70,8 @@ continue # ---------- mallocs ---------- if op.is_malloc(): + # write barriers not valid after possible collection + self.write_to_read_categories() self.handle_malloc_operation(op) continue # ---------- calls ---------- @@ -107,7 +109,11 @@ # return self.newops - + def write_to_read_categories(self): + for v, c in self.known_category.items(): + if c == 'W': + self.known_category[v] = 'R' + def gen_write_barrier(self, v): raise NotImplementedError diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -64,6 +64,43 @@ jump() """, t=NULL) + def test_rewrite_write_barrier_after_malloc(self): + self.check_rewrite(""" + [p1, p3] + setfield_gc(p3, p1, descr=tzdescr) + p2 = new(descr=tdescr) + setfield_gc(p3, p1, descr=tzdescr) + jump(p2) + """, """ + [p1, p3] + cond_call_stm_b(p3, descr=P2Wdescr) + setfield_gc(p3, p1, descr=tzdescr) + p2 = call_malloc_gc(ConstClass(malloc_big_fixedsize), \ + %(tdescr.size)d, %(tdescr.tid)d, \ + descr=malloc_big_fixedsize_descr) + cond_call_stm_b(p3, descr=P2Wdescr) + setfield_gc(p3, p1, descr=tzdescr) + jump(p2) + """) + + def test_rewrite_read_barrier_after_malloc(self): + self.check_rewrite(""" + [p1] + p2 = getfield_gc(p1, descr=tzdescr) + p3 = new(descr=tdescr) + p4 = getfield_gc(p1, descr=tzdescr) + jump(p2) + """, """ + [p1] + cond_call_stm_b(p1, descr=P2Rdescr) + p2 = getfield_gc(p1, descr=tzdescr) + p3 = call_malloc_gc(ConstClass(malloc_big_fixedsize), \ + %(tdescr.size)d, %(tdescr.tid)d, \ + descr=malloc_big_fixedsize_descr) + p4 = getfield_gc(p1, descr=tzdescr) + jump(p2) + """) + def test_rewrite_setfield_gc_on_local(self): self.check_rewrite(""" [p1] From noreply at buildbot.pypy.org Mon Jul 22 09:06:51 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 22 Jul 2013 09:06:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: repeat writebarrier after possible minor collect here too Message-ID: <20130722070651.8D7AD1C07B6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65523:fd2553f4cae2 Date: 2013-07-22 08:54 +0200 http://bitbucket.org/pypy/pypy/changeset/fd2553f4cae2/ Log: repeat writebarrier after possible minor collect here too diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -70,6 +70,33 @@ assert len(self.writemode) == 1 assert self.barriers == [] + def test_repeat_write_barrier_after_malloc(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 6 + def f1(n): + x1.foo = n + lltype.malloc(X) + x1.foo = x1.foo + n + + self.interpret(f1, [4]) + assert len(self.writemode) == 2 + assert self.barriers == ['G2W', 'r2w'] + + def test_repeat_read_barrier_after_malloc(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 6 + def f1(n): + i = x1.foo + lltype.malloc(X) + i = x1.foo + i + return i + + self.interpret(f1, [4]) + assert len(self.writemode) == 1 + assert self.barriers == ['G2R'] + def test_write_may_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) def f1(p, q): diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -150,6 +150,11 @@ category[v] = 'O' # if op.opname in MALLOCS: + # write barriers after a possible minor collection + # are not valid anymore: + for v, c in category.items(): + if c == 'W': + category[v] = 'R' category[op.result] = 'W' block.operations = newoperations From noreply at buildbot.pypy.org Mon Jul 22 09:06:52 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 22 Jul 2013 09:06:52 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: merge (I should pull before doing things..) Message-ID: <20130722070652.EFA701C07B6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65524:b8c3b478c594 Date: 2013-07-22 09:01 +0200 http://bitbucket.org/pypy/pypy/changeset/b8c3b478c594/ Log: merge (I should pull before doing things..) diff --git a/rpython/translator/stm/test/test_writebarrier.py b/rpython/translator/stm/test/test_writebarrier.py --- a/rpython/translator/stm/test/test_writebarrier.py +++ b/rpython/translator/stm/test/test_writebarrier.py @@ -24,7 +24,7 @@ res = self.interpret(f1, [-5]) assert res == 42 assert len(self.writemode) == 0 - assert self.barriers == ['G2R'] + assert self.barriers == ['P2R'] def test_simple_write(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -37,7 +37,7 @@ self.interpret(f1, [4]) assert x1.foo == 4 assert len(self.writemode) == 1 - assert self.barriers == ['G2W'] + assert self.barriers == ['P2W'] def test_multiple_reads(self): X = lltype.GcStruct('X', ('foo', lltype.Signed), @@ -58,7 +58,7 @@ res = self.interpret(f1, [4]) assert res == -81 assert len(self.writemode) == 0 - assert self.barriers == ['G2R'] + assert self.barriers == ['P2R'] def test_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -81,7 +81,7 @@ self.interpret(f1, [4]) assert len(self.writemode) == 2 - assert self.barriers == ['G2W', 'r2w'] + assert self.barriers == ['P2W', 'r2w'] def test_repeat_read_barrier_after_malloc(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -95,7 +95,7 @@ self.interpret(f1, [4]) assert len(self.writemode) == 1 - assert self.barriers == ['G2R'] + assert self.barriers == ['P2R'] def test_write_may_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -109,10 +109,10 @@ y = lltype.malloc(X, immortal=True) res = self.interpret(f1, [x, y]) assert res == 36 - assert self.barriers == ['P2R', 'P2W', 'o2r'] + assert self.barriers == ['P2R', 'P2W', 'p2r'] res = self.interpret(f1, [x, x]) assert res == 42 - assert self.barriers == ['P2R', 'P2W', 'O2R'] + assert self.barriers == ['P2R', 'P2W', 'P2R'] def test_write_cannot_alias(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) @@ -254,18 +254,31 @@ x.foo = 815 x.zbar = 'A' external_stuff() - result = x.foo - if isinstance(x, Y): - result += x.ybar + result = x.foo # 1 + if isinstance(x, Y): # 2 + result += x.ybar # 3 return result res = self.interpret(f1, [10]) assert res == 42 + 10 - assert self.barriers == ['p2r', 'p2r'] # from two blocks (could be - # optimized later) + assert self.barriers == ['p2r', 'p2r', 'p2r'] # from 3 blocks (could be + # optimized later) res = self.interpret(f1, [-10]) assert res == 815 - assert self.barriers == ['p2r'] + assert self.barriers == ['p2r', 'p2r'] + + def test_write_barrier_repeated(self): + class X: + pass + x = X() + def f1(i): + x.a = i # write barrier + y = X() # malloc + x.a += 1 # write barrier again + return y + + res = self.interpret(f1, [10]) + assert self.barriers == ['P2W', 'r2w'] external_stuff = rffi.llexternal('external_stuff', [], lltype.Void, diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -2,7 +2,7 @@ from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache from rpython.translator.stm.transform import STMTransformer -from rpython.translator.stm.writebarrier import MORE_PRECISE_CATEGORIES +from rpython.translator.stm.writebarrier import NEEDS_BARRIER from rpython.conftest import option @@ -29,13 +29,13 @@ self.writemode = set() self.barriers = [] - def get_category(self, p): + def get_category_or_null(self, p): if isinstance(p, _stmptr): return p._category if not p: return 'N' if p._solid: - return 'G' # allocated with immortal=True + return 'P' # allocated with immortal=True raise AssertionError("unknown category on %r" % (p,)) def interpret(self, fn, args): @@ -71,19 +71,19 @@ if isinstance(value, _stmptr): yield value - def get_category(self, p): - return self.llinterpreter.tester.get_category(p) + def get_category_or_null(self, p): + return self.llinterpreter.tester.get_category_or_null(p) def check_category(self, p, expected): - cat = self.get_category(p) - assert cat in MORE_PRECISE_CATEGORIES[expected] + cat = self.get_category_or_null(p) + assert cat in 'NPRW' return cat def op_stm_barrier(self, kind, obj): frm, middledigit, to = kind assert middledigit == '2' cat = self.check_category(obj, frm) - if cat in MORE_PRECISE_CATEGORIES[to]: + if not NEEDS_BARRIER[cat, to]: # a barrier, but with no effect self.llinterpreter.tester.barriers.append(kind.lower()) return obj @@ -109,10 +109,10 @@ def op_setfield(self, obj, fieldname, fieldvalue): if not obj._TYPE.TO._immutable_field(fieldname): self.check_category(obj, 'W') - # convert R -> O all other pointers to the same object we can find + # convert R -> P all other pointers to the same object we can find for p in self.all_stm_ptrs(): if p._category == 'R' and p._T == obj._T and p == obj: - _stmptr._category.__set__(p, 'O') + _stmptr._category.__set__(p, 'P') return LLFrame.op_setfield(self, obj, fieldname, fieldvalue) def op_cast_pointer(self, RESTYPE, obj): diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -4,6 +4,7 @@ from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.stm.threadlocalref import transform_tlref from rpython.translator.c.support import log +from rpython.memory.gctransform.framework import CollectAnalyzer class STMTransformer(object): @@ -26,9 +27,11 @@ def transform_write_barrier(self): self.write_analyzer = WriteAnalyzer(self.translator) + self.collect_analyzer = CollectAnalyzer(self.translator) for graph in self.translator.graphs: insert_stm_barrier(self, graph) del self.write_analyzer + del self.collect_analyzer def transform_turn_inevitable(self): for graph in self.translator.graphs: diff --git a/rpython/translator/stm/writebarrier.py b/rpython/translator/stm/writebarrier.py --- a/rpython/translator/stm/writebarrier.py +++ b/rpython/translator/stm/writebarrier.py @@ -9,14 +9,14 @@ 'malloc_nonmovable', 'malloc_nonmovable_varsize', ]) -MORE_PRECISE_CATEGORIES = { - 'P': 'PGORLWN', # Pointer: the most general category - 'G': 'GN', # Global: known to be a non-local pointer - 'O': 'ORLWN', # Old: used to be read-ready, but maybe someone wrote - 'R': 'RLWN', # Read-ready: direct reads from there are ok - 'L': 'LWN', # Local: a local pointer - 'W': 'WN', # Write-ready: direct writes here are ok - 'N': 'N'} # NULL (the other categories also all contain NULL) +NEEDS_BARRIER = { + ('P', 'R'): True, + ('P', 'W'): True, + ('R', 'R'): False, + ('R', 'W'): True, + ('W', 'R'): False, + ('W', 'W'): False, + } def unwraplist(list_v): for v in list_v: @@ -44,14 +44,20 @@ def insert_stm_barrier(stmtransformer, graph): + """This function uses the following characters for 'categories': + + * 'P': a general pointer + * 'R': the read barrier was applied + * 'W': the write barrier was applied + """ graphinfo = stmtransformer.write_analyzer.compute_graph_info(graph) def get_category(v): - if isinstance(v, Constant): - if v.value: - return 'G' - else: - return 'N' # NULL + return category.get(v, 'P') + + def get_category_or_null(v): + if isinstance(v, Constant) and not v.value: + return 'N' return category.get(v, 'P') def renamings_get(v): @@ -82,7 +88,7 @@ op.result.concretetype is not lltype.Void and op.args[0].concretetype.TO._gckind == 'gc' and True): #not is_immutable(op)): XXX see [1] - wants_a_barrier.setdefault(op, 'R') + wants_a_barrier[op] = 'R' elif (op.opname in ('setfield', 'setarrayitem', 'setinteriorfield') and op.args[-1].concretetype is not lltype.Void and @@ -113,7 +119,7 @@ v_holder = renamings.setdefault(v, [v]) v = v_holder[0] frm = get_category(v) - if frm not in MORE_PRECISE_CATEGORIES[to]: + if NEEDS_BARRIER[frm, to]: c_info = Constant('%s2%s' % (frm, to), lltype.Void) w = varoftype(v.concretetype) newop = SpaceOperation('stm_barrier', [c_info, v], w) @@ -127,9 +133,9 @@ newoperations.append(newop) # if op in expand_comparison: - cats = ''.join([get_category(v) for v in newop.args]) - if ('N' not in cats and - cats not in ('LL', 'LW', 'WL', 'WW')): + cats = (get_category_or_null(newop.args[0]), + get_category_or_null(newop.args[1])) + if 'N' not in cats and cats != ('W', 'W'): if newop.opname == 'ptr_ne': v = varoftype(lltype.Bool) negop = SpaceOperation('bool_not', [v], @@ -137,24 +143,33 @@ newoperations.append(negop) newop.result = v newop.opname = 'stm_ptr_eq' - # + + if stmtransformer.collect_analyzer.analyze(op): + # this operation can collect: we bring all 'W' + # categories back to 'R', because we would need + # another stm_write_barrier on them afterwards + for v, cat in category.items(): + if cat == 'W': + category[v] = 'R' + effectinfo = stmtransformer.write_analyzer.analyze( op, graphinfo=graphinfo) if effectinfo: if effectinfo is top_set: - category.clear() + # this operation can perform random writes: any + # 'R'-category object falls back to 'P' because + # we would need another stm_read_barrier() + for v, cat in category.items(): + if cat == 'R': + category[v] = 'P' else: + # the same, but only on objects of the right types types = set([entry[1] for entry in effectinfo]) for v in category.keys(): if v.concretetype in types and category[v] == 'R': - category[v] = 'O' - # + category[v] = 'P' + if op.opname in MALLOCS: - # write barriers after a possible minor collection - # are not valid anymore: - for v, c in category.items(): - if c == 'W': - category[v] = 'R' category[op.result] = 'W' block.operations = newoperations From noreply at buildbot.pypy.org Mon Jul 22 17:56:40 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 22 Jul 2013 17:56:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: import weakref branch Message-ID: <20130722155640.714C51C04B4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65525:a4260ea734e1 Date: 2013-07-22 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/a4260ea734e1/ Log: import weakref branch diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -7,6 +7,29 @@ */ #include "stmimpl.h" +#ifdef _GC_DEBUG +char tmp_buf[128]; +char* stm_dbg_get_hdr_str(gcptr obj) +{ + char *cur; + char *flags[] = GC_FLAG_NAMES; + int i; + + i = 0; + cur = tmp_buf; + cur += sprintf(cur, "%p:", obj); + while (flags[i]) { + if (obj->h_tid & (STM_FIRST_GCFLAG << i)) { + cur += sprintf(cur, "%s|", flags[i]); + } + i++; + } + cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); + return tmp_buf; +} +#endif + + __thread struct tx_descriptor *thread_descriptor = NULL; @@ -546,6 +569,7 @@ gcptr stm_WriteBarrier(gcptr P) { + assert(!(P->h_tid & GCFLAG_IMMUTABLE)); if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into @@ -1092,7 +1116,7 @@ #endif L->h_revision = new_revision; - gcptr stub = stm_stub_malloc(d->public_descriptor); + gcptr stub = stm_stub_malloc(d->public_descriptor, 0); stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -73,6 +73,8 @@ static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; +static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; + /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -90,6 +92,8 @@ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ + "HAS_ID", \ + "IMMUTABLE", \ NULL } #define IS_POINTER(v) (!((v) & 1)) /* even-valued number */ @@ -197,4 +201,7 @@ void DescriptorInit(void); void DescriptorDone(void); +#ifdef _GC_DEBUG +char* stm_dbg_get_hdr_str(gcptr obj); +#endif #endif /* _ET_H */ diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -4,7 +4,7 @@ void stm_copy_to_old_id_copy(gcptr obj, gcptr id) { - //assert(!is_in_nursery(thread_descriptor, id)); + //assert(!stmgc_is_in_nursery(thread_descriptor, id)); assert(id->h_tid & GCFLAG_OLD); size_t size = stmgc_size(obj); @@ -108,10 +108,12 @@ else { /* must create shadow original object XXX: or use backup, if exists */ - - /* XXX use stmgcpage_malloc() directly, we don't need to copy - * the contents yet */ - gcptr O = stmgc_duplicate_old(p); + gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p)); + memcpy(O, p, stmgc_size(p)); /* at least major collections + depend on some content of id_copy. + remove after fixing that XXX */ + O->h_tid |= GCFLAG_OLD; + p->h_original = (revision_t)O; p->h_tid |= GCFLAG_HAS_ID; diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -223,11 +223,13 @@ if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - id_copy->h_tid |= GCFLAG_VISITED; + if (!(id_copy->h_tid & GCFLAG_VISITED)) { + id_copy->h_tid |= GCFLAG_VISITED; - /* XXX: may not always need tracing? */ - //if (!(id_copy->h_tid & GCFLAG_STUB)) - // gcptrlist_insert(&objects_to_trace, id_copy); + /* XXX: may not always need tracing? */ + if (!(id_copy->h_tid & GCFLAG_STUB)) + gcptrlist_insert(&objects_to_trace, id_copy); + } } else { /* prebuilt originals won't get collected anyway @@ -237,6 +239,14 @@ } } +static void visit(gcptr *pobj); + +gcptr stmgcpage_visit(gcptr obj) +{ + visit(&obj); + return obj; +} + static void visit(gcptr *pobj) { gcptr obj = *pobj; @@ -276,10 +286,10 @@ keep_original_alive(prev_obj); assert(*pobj == prev_obj); - gcptr obj1 = obj; - visit(&obj1); /* recursion, but should be only once */ + /* recursion, but should be only once */ + obj = stmgcpage_visit(obj); assert(prev_obj->h_tid & GCFLAG_STUB); - prev_obj->h_revision = ((revision_t)obj1) | 2; + prev_obj->h_revision = ((revision_t)obj) | 2; return; } } @@ -452,11 +462,11 @@ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_young_stubs) == 0); + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); /* NOT NECESSARILY EMPTY: - list_of_read_objects - private_from_protected - public_to_private - - old_objects_to_trace */ assert(gcptrlist_size(&d->list_of_read_objects) == d->num_read_objects_known_old); @@ -488,8 +498,15 @@ /* If we're aborting this transaction anyway, we don't need to do * more here. */ - if (d->active < 0) - return; /* already "aborted" during forced minor collection */ + if (d->active < 0) { + /* already "aborted" during forced minor collection + clear list of read objects so that a possible minor collection + before the abort doesn't trip + fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; + return; + } if (d->active == 2) { /* inevitable transaction: clear the list of read objects */ @@ -518,6 +535,9 @@ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); AbortTransactionAfterCollect(d, ABRT_COLLECT_MAJOR); + /* fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; return; } @@ -776,9 +796,13 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); mark_all_stack_roots(); - visit_all_objects(); + do { + visit_all_objects(); + stm_visit_old_weakrefs(); + } while (gcptrlist_size(&objects_to_trace) != 0); gcptrlist_delete(&objects_to_trace); clean_up_lists_of_read_objects_and_fix_outdated_flags(); + stm_clean_old_weakrefs(); mc_total_in_use = mc_total_reserved = 0; free_all_unused_local_pages(); diff --git a/rpython/translator/stm/src_stm/gcpage.h b/rpython/translator/stm/src_stm/gcpage.h --- a/rpython/translator/stm/src_stm/gcpage.h +++ b/rpython/translator/stm/src_stm/gcpage.h @@ -46,7 +46,8 @@ /* These fields are in tx_public_descriptor rather than tx_descriptor. The indirection allows us to keep around the lists of pages even - after the thread finishes, until the next major collection. + after the thread finishes. Such a "zombie" tx_public_descriptor + is reused by the next thread that starts. */ #define GCPAGE_FIELDS_DECL \ /* The array 'pages_for_size' contains GC_SMALL_REQUESTS \ @@ -66,7 +67,10 @@ /* A set of all non-small objects (outside the nursery). \ We could also have a single global set, but this avoids \ locking in stmgcpage_malloc/free. */ \ - struct G2L nonsmall_objects; + struct G2L nonsmall_objects; \ + \ + /* Weakref support */ \ + struct GcPtrList old_weakrefs; #define LOCAL_GCPAGES() (thread_descriptor->public_descriptor) @@ -81,6 +85,7 @@ void stmgcpage_add_prebuilt_root(gcptr obj); void stmgcpage_possibly_major_collect(int force); long stmgcpage_count(int quantity); +gcptr stmgcpage_visit(gcptr); extern struct GcPtrList stm_prebuilt_gcroots; diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -1,8 +1,7 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ #include "stmimpl.h" - -static int is_in_nursery(struct tx_descriptor *d, gcptr obj) +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj) { return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); } @@ -55,6 +54,7 @@ gcptrlist_delete(&d->old_objects_to_trace); gcptrlist_delete(&d->public_with_young_copy); + gcptrlist_delete(&d->young_weakrefs); } void stmgc_minor_collect_soon(void) @@ -101,6 +101,13 @@ return P; } +gcptr stm_allocate_immutable(size_t size, unsigned long tid) +{ + gcptr P = stm_allocate(size, tid); + P->h_tid |= GCFLAG_IMMUTABLE; + return P; +} + gcptr stmgc_duplicate(gcptr P) { size_t size = stmgc_size(P); @@ -148,7 +155,7 @@ gcptr fresh_old_copy; struct tx_descriptor *d = thread_descriptor; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* not a nursery object */ } else { @@ -375,7 +382,7 @@ for (i = d->list_of_read_objects.size - 1; i >= limit; --i) { gcptr obj = items[i]; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* non-young or visited young objects are kept */ continue; } @@ -409,6 +416,7 @@ { assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); + assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); spinlock_release(d->public_descriptor->collection_lock); @@ -444,6 +452,8 @@ surviving young-but-outside-the-nursery objects have been flagged with GCFLAG_OLD */ + stm_move_young_weakrefs(d); + teardown_minor_collect(d); assert(!stm_has_got_any_lock(d)); @@ -510,6 +520,7 @@ !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); + assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->list_of_read_objects) >= d->num_read_objects_known_old); assert(gcptrlist_size(&d->private_from_protected) >= diff --git a/rpython/translator/stm/src_stm/nursery.h b/rpython/translator/stm/src_stm/nursery.h --- a/rpython/translator/stm/src_stm/nursery.h +++ b/rpython/translator/stm/src_stm/nursery.h @@ -51,7 +51,10 @@ still in the same transaction, to know that the initial \ part of the lists cannot contain young objects any more. */ \ long num_private_from_protected_known_old; \ - long num_read_objects_known_old; + long num_read_objects_known_old; \ + \ + /* Weakref support */ \ + struct GcPtrList young_weakrefs; struct tx_descriptor; /* from et.h */ @@ -65,5 +68,6 @@ size_t stmgc_size(gcptr); void stmgc_trace(gcptr, void visit(gcptr *)); void stmgc_minor_collect_soon(void); +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj); #endif diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -dd0aff1663a1 +4cad3aa5a20b diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -2,11 +2,13 @@ #include "stmimpl.h" -gcptr stm_stub_malloc(struct tx_public_descriptor *pd) +gcptr stm_stub_malloc(struct tx_public_descriptor *pd, size_t minsize) { assert(pd->collection_lock != 0); + if (minsize < sizeof(struct stm_stub_s)) + minsize = sizeof(struct stm_stub_s); - gcptr p = stmgcpage_malloc(sizeof(struct stm_stub_s)); + gcptr p = stmgcpage_malloc(minsize); STUB_THREAD(p) = pd; return p; } @@ -22,9 +24,56 @@ { gcptr stub, obj = *pobj; if (obj == NULL || (obj->h_tid & (GCFLAG_PUBLIC | GCFLAG_OLD)) == - (GCFLAG_PUBLIC | GCFLAG_OLD)) + (GCFLAG_PUBLIC | GCFLAG_OLD)) return; + if (obj->h_tid & GCFLAG_IMMUTABLE) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public, replace with stolen old copy */ + assert(obj->h_tid & GCFLAG_NURSERY_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ + goto done; + } + + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; + } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + + return; + } + /* we use 'all_stubs', a dictionary, in order to try to avoid duplicate stubs for the same object. XXX maybe it would be better to use a fast approximative cache that stays around for @@ -39,8 +88,20 @@ assert(stub->h_revision == (((revision_t)obj) | 2)); goto done; - not_found: - stub = stm_stub_malloc(sd->foreign_pd); + not_found:; + size_t size = 0; + if (!obj->h_original && !(obj->h_tid & GCFLAG_OLD)) { + /* There shouldn't be a public, young object without + a h_original. But there can be priv/protected ones. + We have a young protected copy without an h_original + The stub we allocate will be the h_original, but + it must be big enough to be copied over by a major + collection later. */ + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + + size = stmgc_size(obj); + } + stub = stm_stub_malloc(sd->foreign_pd, size); stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; @@ -52,10 +113,9 @@ stub->h_original = (revision_t)obj; } else { - /* There shouldn't be a public, young object without - a h_original. But there can be protected ones. */ - assert(!(obj->h_tid & GCFLAG_PUBLIC)); - obj->h_original = (revision_t)stub; + /* this is the big-stub case described above */ + obj->h_original = (revision_t)stub; + stub->h_original = 0; /* stub_malloc does not set to 0... */ if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { ((gcptr)obj->h_revision)->h_original = (revision_t)stub; } diff --git a/rpython/translator/stm/src_stm/steal.h b/rpython/translator/stm/src_stm/steal.h --- a/rpython/translator/stm/src_stm/steal.h +++ b/rpython/translator/stm/src_stm/steal.h @@ -10,7 +10,7 @@ #define STUB_THREAD(h) (((struct stm_stub_s *)(h))->s_thread) -gcptr stm_stub_malloc(struct tx_public_descriptor *); +gcptr stm_stub_malloc(struct tx_public_descriptor *, size_t minsize); void stm_steal_stub(gcptr); gcptr stm_get_stolen_obj(long index); /* debugging */ void stm_normalize_stolen_objects(struct tx_descriptor *); diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -11,5 +11,6 @@ #include "gcpage.c" #include "stmsync.c" #include "extra.c" +#include "weakref.c" #include "dbgmem.c" #include "fprintcolor.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -30,6 +30,9 @@ /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); +/* allocate an object that is be immutable. it cannot be changed with + a stm_write_barrier() or after the next commit */ +gcptr stm_allocate_immutable(size_t size, unsigned long tid); /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); @@ -55,11 +58,19 @@ int stm_enter_callback_call(void); void stm_leave_callback_call(int); -/* read/write barriers (the most general versions only for now) */ -#if 0 // (optimized version below) -gcptr stm_read_barrier(gcptr); -gcptr stm_write_barrier(gcptr); -#endif +/* read/write barriers (the most general versions only for now). + + - the read barrier must be applied before reading from an object. + the result is valid as long as we're in the same transaction, + and stm_write_barrier() is not called on the same object. + + - the write barrier must be applied before writing to an object. + the result is valid for a shorter period of time: we have to + do stm_write_barrier() again if we ended the transaction, or + if we did a potential collection (e.g. stm_allocate()). +*/ +static inline gcptr stm_read_barrier(gcptr); +static inline gcptr stm_write_barrier(gcptr); /* start a new transaction, calls callback(), and when it returns finish that transaction. callback() is called with the 'arg' @@ -115,6 +126,14 @@ void stm_minor_collect(void); void stm_major_collect(void); +/* weakref support: allocate a weakref object, and set it to point + weakly to 'obj'. The weak pointer offset is hard-coded to be at + 'size - WORD'. Important: stmcb_trace() must NOT trace it. + Weakrefs are *immutable*! Don't attempt to use stm_write_barrier() + on them. */ +gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); + + /**************** END OF PUBLIC INTERFACE *****************/ /************************************************************/ diff --git a/rpython/translator/stm/src_stm/stmimpl.h b/rpython/translator/stm/src_stm/stmimpl.h --- a/rpython/translator/stm/src_stm/stmimpl.h +++ b/rpython/translator/stm/src_stm/stmimpl.h @@ -37,5 +37,6 @@ #include "steal.h" #include "stmsync.h" #include "extra.h" +#include "weakref.h" #endif From noreply at buildbot.pypy.org Mon Jul 22 17:56:41 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 22 Jul 2013 17:56:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add weakref support and log rewritten jit trace Message-ID: <20130722155641.C0AF11C04B4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65526:ac7d49c563e5 Date: 2013-07-22 17:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ac7d49c563e5/ Log: add weakref support and log rewritten jit trace diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -397,16 +397,24 @@ if IS_X86_32: # we have 2 extra words on stack for retval and we pass 1 extra # arg, so we need to substract 2 words + # ||val|retadr| mc.SUB_ri(esp.value, 2 * WORD) + # ||val|retadr|x|x|| mc.MOV_rs(eax.value, 3 * WORD) # 2 + 1 mc.MOV_sr(0, eax.value) + # ||val|retadr|x|val|| else: + # ||val|retadr|| mc.MOV_rs(edi.value, WORD) else: + # ||retadr| # we have one word to align mc.SUB_ri(esp.value, 7 * WORD) # align and reserve some space + # ||retadr|x||x|x||x|x||x|x|| mc.MOV_sr(WORD, eax.value) # save for later + # ||retadr|x||x|x||x|x||rax|x|| mc.MOVSD_sx(3 * WORD, xmm0.value) + # ||retadr|x||x|x||xmm0|x||rax|x|| if IS_X86_32: mc.MOV_sr(4 * WORD, edx.value) mc.MOV_sr(0, ebp.value) @@ -427,19 +435,24 @@ if descr.returns_modified_object: # new addr in eax, save to now unused arg if for_frame: + # ||retadr|x||x|x||xmm0|x||rax|x|| mc.PUSH_r(eax.value) + # ||retadr|x||x|x||xmm0|x||rax|x||result| elif IS_X86_32: mc.MOV_sr(3 * WORD, eax.value) + # ||val|retadr|x|val|| + # -> ||result|retaddr|x|val|| else: mc.MOV_sr(WORD, eax.value) + # ||val|retadr|| -> ||result|retadr|| if withcards: # A final TEST8 before the RET, for the caller. Careful to # not follow this instruction with another one that changes # the status of the CPU flags! - assert not is_stm + assert not is_stm and not descr.returns_modified_object if IS_X86_32: - mc.MOV_rs(eax.value, 3*WORD) + mc.MOV_rs(eax.value, 3 * WORD) else: mc.MOV_rs(eax.value, WORD) mc.TEST8(addr_add_const(eax, descr.jit_wb_if_flag_byteofs), @@ -460,14 +473,18 @@ else: if IS_X86_32: mc.MOV_rs(edx.value, 5 * WORD) + # ||retadr|x||x|x||xmm0|x||rax|x||result| mc.MOVSD_xs(xmm0.value, 4 * WORD) mc.MOV_rs(eax.value, 2 * WORD) # restore self._restore_exception(mc, exc0, exc1) mc.MOV(exc0, RawEspLoc(WORD * 6, REF)) mc.MOV(exc1, RawEspLoc(WORD * 7, INT)) - mc.POP_r(eax.value) # return value - + if IS_X86_32: + mc.POP_r(edx.value) # return value + else: + mc.POP_r(edi.value) # return value + mc.LEA_rs(esp.value, 7 * WORD) mc.RET() @@ -478,7 +495,8 @@ else: descr.set_b_slowpath(withcards + 2 * withfloats, rawstart) - def assemble_loop(self, loopname, inputargs, operations, looptoken, log): + def assemble_loop(self, loopname, inputargs, operations, looptoken, log, + logger=None): '''adds the following attributes to looptoken: _ll_function_addr (address of the generated func, as an int) _ll_loop_code (debug: addr of the start of the ResOps) @@ -513,6 +531,9 @@ self._check_frame_depth_debug(self.mc) operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) + if logger: + logger.log_loop(inputargs, operations, -2, "rewritten", + name=loopname) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -554,7 +575,7 @@ size_excluding_failure_stuff - looppos) def assemble_bridge(self, faildescr, inputargs, operations, - original_loop_token, log): + original_loop_token, log, logger=None): if not we_are_translated(): # Arguments should be unique assert len(set(inputargs)) == len(inputargs) @@ -572,6 +593,8 @@ operations, self.current_clt.allgcrefs, self.current_clt.frame_info) + if logger: + logger.log_bridge(inputargs, operations, "rewritten") self._check_frame_depth(self.mc, regalloc.get_gcmap()) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() @@ -2138,10 +2161,21 @@ if not is_frame: mc.PUSH(loc_base) if is_frame and align_stack: + # ||retadr| mc.SUB_ri(esp.value, 16 - WORD) # erase the return address + # ||retadr|...|| func = descr.get_b_slowpath(helper_num) mc.CALL(imm(func)) - mc.MOV_rr(loc_base.value, eax.value) + + # result in eax, except if is_frame + if is_frame: + if IS_X86_32: + mc.MOV_rr(loc_base.value, edx.value) + else: + mc.MOV_rr(loc_base.value, edi.value) + else: + mc.MOV_rr(loc_base.value, eax.value) + if is_frame and align_stack: mc.ADD_ri(esp.value, 16 - WORD) # erase the return address diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -90,16 +90,18 @@ lines = machine_code_dump(data, addr, self.backend_name, label_list) print ''.join(lines) - def compile_loop(self, inputargs, operations, looptoken, log=True, name=''): + def compile_loop(self, inputargs, operations, looptoken, log=True, name='', + logger=None): return self.assembler.assemble_loop(name, inputargs, operations, - looptoken, log=log) + looptoken, log=log, logger=logger) def compile_bridge(self, faildescr, inputargs, operations, - original_loop_token, log=True): + original_loop_token, log=True, logger=None): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() return self.assembler.assemble_bridge(faildescr, inputargs, operations, - original_loop_token, log=log) + original_loop_token, log=log, + logger=logger) def clear_latest_values(self, count): setitem = self.assembler.fail_boxes_ptr.setitem diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -303,14 +303,17 @@ metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', name=name) return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, - log=log, name=name) + log=log, name=name, + logger=metainterp_sd.logger_ops) def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True): metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling") assert isinstance(faildescr, AbstractFailDescr) return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, - original_loop_token, log=log) + original_loop_token, log=log, + logger=metainterp_sd.logger_ops) + def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): vinfo = jitdriver_sd.virtualizable_info diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -39,6 +39,10 @@ debug_start("jit-log-compiling-bridge") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-compiling-bridge") + elif extra == "rewritten": + debug_start("jit-log-rewritten-bridge") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-bridge") else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -104,7 +104,7 @@ # XXX finalizers are ignored for now #ll_assert(not needs_finalizer, 'XXX needs_finalizer') #ll_assert(not is_finalizer_light, 'XXX is_finalizer_light') - ll_assert(not contains_weakptr, 'XXX contains_weakptr') + #ll_assert(not contains_weakptr, 'XXX contains_weakptr') # XXX call optimized versions, e.g. if size < GC_NURSERY_SECTION return llop.stm_allocate(llmemory.GCREF, size, typeid16) @@ -117,6 +117,9 @@ (obj + offset_to_length).signed[0] = length return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_weakref(self, typeid16, size, obj): + return llop.stm_weakref_allocate(llmemory.GCREF, size, + typeid16, obj) def can_move(self, obj): """Means the reference will stay valid, except if not diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -1,8 +1,10 @@ from rpython.annotator import model as annmodel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.memory.gctransform.framework import ( +from rpython.memory.gctransform.framework import ( TYPE_ID, BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) +from rpython.memory.gctypelayout import WEAKREF, WEAKREFPTR +from rpython.rtyper import rmodel class StmFrameworkGCTransformer(BaseFrameworkGCTransformer): @@ -12,6 +14,13 @@ s_gc, s_typeid16) gc = self.gcdata.gc # + s_gcref = annmodel.SomePtr(llmemory.GCREF) + + self.malloc_weakref_ptr = self._getfn( + GCClass.malloc_weakref.im_func, + [s_gc, s_typeid16, annmodel.SomeInteger(nonneg=True), + s_gcref], s_gcref) + # def pypy_stmcb_size(obj): return gc.get_size(obj) pypy_stmcb_size.c_name = "pypy_stmcb_size" @@ -49,6 +58,40 @@ def gct_gc_adr_of_root_stack_top(self, hop): hop.genop("stm_get_root_stack_top", [], resultvar=hop.spaceop.result) + def gct_weakref_create(self, hop): + op = hop.spaceop + + type_id = self.get_type_id(WEAKREF) + + c_type_id = rmodel.inputconst(TYPE_ID, type_id) + info = self.layoutbuilder.get_info(type_id) + c_size = rmodel.inputconst(lltype.Signed, info.fixedsize) + malloc_ptr = self.malloc_weakref_ptr + c_null = rmodel.inputconst(llmemory.Address, llmemory.NULL) + args = [self.c_const_gc, c_type_id, c_size, c_null] + # XXX: for now, set weakptr ourselves and simply pass NULL + + # push and pop the current live variables *including* the argument + # to the weakref_create operation, which must be kept alive and + # moved if the GC needs to collect + livevars = self.push_roots(hop, keep_current_args=True) + v_result = hop.genop("direct_call", [malloc_ptr] + args, + resulttype=llmemory.GCREF) + v_result = hop.genop("cast_opaque_ptr", [v_result], + resulttype=WEAKREFPTR) + self.pop_roots(hop, livevars) + # cast_ptr_to_adr must be done after malloc, as the GC pointer + # might have moved just now. + v_instance, = op.args + v_addr = hop.genop("cast_ptr_to_adr", [v_instance], + resulttype=llmemory.Address) + hop.genop("bare_setfield", + [v_result, rmodel.inputconst(lltype.Void, "weakptr"), v_addr]) + v_weakref = hop.genop("cast_ptr_to_weakrefptr", [v_result], + resulttype=llmemory.WeakRefPtr) + hop.cast_result(v_weakref) + + def _gct_with_roots_pushed(self, hop): livevars = self.push_roots(hop) self.default(hop) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -445,6 +445,8 @@ 'stm_leave_callback_call':LLOp(), 'stm_abort_and_retry': LLOp(), + 'stm_weakref_allocate': LLOp(sideeffects=False, canmallocgc=True), + 'stm_threadlocalref_get': LLOp(sideeffects=False), 'stm_threadlocalref_set': LLOp(), 'stm_threadlocal_get': LLOp(sideeffects=False), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -593,6 +593,7 @@ OP_STM_POP_ROOT_INTO = _OP_STM OP_STM_GET_ROOT_STACK_TOP = _OP_STM OP_STM_ALLOCATE = _OP_STM + OP_STM_WEAKREF_ALLOCATE = _OP_STM OP_STM_GET_TID = _OP_STM OP_STM_HASH = _OP_STM OP_STM_ID = _OP_STM diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -99,6 +99,14 @@ return '%s = (%s)stm_shadowstack;' % ( result, cdecl(funcgen.lltypename(op.result), '')) +def stm_weakref_allocate(funcgen, op): + arg0 = funcgen.expr(op.args[0]) + arg1 = funcgen.expr(op.args[1]) + arg2 = funcgen.expr(op.args[2]) + result = funcgen.expr(op.result) + return '%s = stm_weakref_allocate(%s, %s, %s);' % (result, arg0, + arg1, arg2) + def stm_allocate(funcgen, op): arg0 = funcgen.expr(op.args[0]) arg1 = funcgen.expr(op.args[1]) From noreply at buildbot.pypy.org Mon Jul 22 18:47:40 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 22 Jul 2013 18:47:40 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: bah took forever to fix - make sure gcmap and push_all_regs is consistent Message-ID: <20130722164740.6371D1C016D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65527:b35486184116 Date: 2013-07-22 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/b35486184116/ Log: bah took forever to fix - make sure gcmap and push_all_regs is consistent diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -155,9 +155,6 @@ """ mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) - gcrootmap = self.cpu.gc_ll_descr.gcrootmap - if gcrootmap and gcrootmap.is_shadow_stack: - self._call_header_shadowstack(mc, gcrootmap, selected_reg=r8) mc.SUB(esp, imm(WORD)) self.set_extra_stack_depth(mc, 2 * WORD) # args are in their respective positions @@ -165,8 +162,6 @@ mc.ADD(esp, imm(WORD)) self.set_extra_stack_depth(mc, 0) self._reload_frame_if_necessary(mc, align_stack=True) - if gcrootmap and gcrootmap.is_shadow_stack: - self._call_footer_shadowstack(mc, gcrootmap, selected_reg=r8) self._pop_all_regs_from_frame(mc, [], supports_floats, callee_only) mc.RET() @@ -1745,7 +1740,8 @@ regs = gpr_reg_mgr_cls.all_regs for i, gpr in enumerate(regs): if gpr not in ignored_regs: - mc.MOV_br(i * WORD + base_ofs, gpr.value) + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + mc.MOV_br(v * WORD + base_ofs, gpr.value) if withfloats: if IS_X86_64: coeff = 1 @@ -1766,7 +1762,8 @@ regs = gpr_reg_mgr_cls.all_regs for i, gpr in enumerate(regs): if gpr not in ignored_regs: - mc.MOV_rb(gpr.value, i * WORD + base_ofs) + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + mc.MOV_rb(gpr.value, v * WORD + base_ofs) if withfloats: # Pop all XMM regs if IS_X86_64: From noreply at buildbot.pypy.org Tue Jul 23 03:26:01 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 23 Jul 2013 03:26:01 +0200 (CEST) Subject: [pypy-commit] pypy default: convert to formal app-level/interp level tests, for py3k Message-ID: <20130723012601.2CD861C073E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65528:8ebd8a627364 Date: 2013-07-22 18:12 -0700 http://bitbucket.org/pypy/pypy/changeset/8ebd8a627364/ Log: convert to formal app-level/interp level tests, for py3k diff --git a/pypy/module/test_lib_pypy/test_md5_extra.py b/pypy/module/test_lib_pypy/test_md5_extra.py --- a/pypy/module/test_lib_pypy/test_md5_extra.py +++ b/pypy/module/test_lib_pypy/test_md5_extra.py @@ -1,227 +1,226 @@ """A test script to compare MD5 implementations. -A note about performance: the pure Python MD5 takes roughly -160 sec. per MB of data on a 233 MHz Intel Pentium CPU. +A note about performance: the pure Python MD5 takes roughly 160 sec. per +MB of data on a 233 MHz Intel Pentium CPU. """ +import md5 -from __future__ import absolute_import -import md5 # CPython's implementation in C. -from lib_pypy import _md5 as pymd5 +from pypy.module.test_lib_pypy.support import import_lib_pypy -# Helpers... +def compare_host(message, d2, d2h): + """Compare results against the host Python's builtin md5. -def formatHex(str): - "Print a string's HEX code in groups of two digits." - - d = map(None, str) - d = map(ord, d) - d = map(lambda x:"%02x" % x, d) - return ' '.join(d) - - -def format(str): - "Print a string as-is in groups of two characters." - - s = '' - for i in range(0, len(str)-1, 2): - s = s + "%03s" % str[i:i+2] - return s[1:] - - -def printDiff(message, d1, d2, expectedResult=None): - "Print different outputs for same message." - - print "Message: '%s'" % message - print "Message length: %d" % len(message) - if expectedResult: - print "%-48s (expected)" % format(expectedResult) - print "%-48s (Std. lib. MD5)" % formatHex(d1) - print "%-48s (Pure Python MD5)" % formatHex(d2) - print - - -# The real comparison function. - -def compareImp(message): - """Compare two MD5 implementations, C vs. pure Python module. - - For equal digests this returns None, otherwise it returns - a tuple of both digests. + For equal digests this returns None, otherwise it returns a tuple of + both digests. """ - - # Use Python's standard library MD5 compiled C module. + # Use the host Python's standard library MD5 compiled C module. m1 = md5.md5() m1.update(message) d1 = m1.digest() d1h = m1.hexdigest() - - # Use MD5 module in pure Python. - m2 = pymd5.new() - m2.update(message) - d2 = m2.digest() - d2h = m2.hexdigest() + # Return None if equal or the different digests if not equal. + return None if d1 == d2 and d1h == d2h else (d1, d2) - # Return None if equal or the different digests if not equal. - if d1 == d2 and d1h == d2h: - return - else: - return d1, d2 +class TestMD5Update: -class TestMD5Compare: - "Compare pure Python MD5 against Python's std. lib. version." - + spaceconfig = dict(usemodules=('struct',)) + + def test_update(self): + """Test updating cloned objects.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) + space = self.space + w__md5 = import_lib_pypy(space, '_md5') + + # Load both with same prefix. + prefix1 = 2**10 * 'a' + + # The host md5 + m1 = md5.md5() + m1.update(prefix1) + m1c = m1.copy() + + # The app-level _md5 + w_m2 = space.call_method(w__md5, 'new') + space.call_method(w_m2, 'update', space.wrap(prefix1)) + w_m2c = space.call_method(w_m2, 'copy') + + # Update and compare... + for i in range(len(cases)): + message = cases[i][0] + + m1c.update(message) + d1 = m1c.hexdigest() + + space.call_method(w_m2c, 'update', space.wrap(message)) + w_d2 = space.call_method(w_m2c, 'hexdigest') + d2 = space.str_w(w_d2) + + assert d1 == d2 + + +class AppTestMD5Compare: + """Compare pure Python MD5 against Python's std. lib. version.""" + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + from pypy.interpreter import gateway + space = cls.space + cls.w__md5 = import_lib_pypy(space, '_md5') + if cls.runappdirect: + # interp2app doesn't work in appdirect mode + cls.w_compare_host = staticmethod(compare_host) + else: + compare_host.unwrap_spec = [str, str, str] + cls.w_compare_host = space.wrap(gateway.interp2app(compare_host)) + + def w_compare(self, message): + # Generate results against the app-level pure Python MD5 and + # pass them off for comparison against the host Python's MD5 + m2 = self._md5.new() + m2.update(message) + return self.compare_host(message, m2.digest(), m2.hexdigest()) + + def w__format_hex(self, string): + """Print a string's HEX code in groups of two digits.""" + d = map(None, string) + d = map(ord, d) + d = map(lambda x: "%02x" % x, d) + return ' '.join(d) + + def w__format(self, string): + """Print a string as-is in groups of two characters.""" + s = '' + for i in range(0, len(string) - 1, 2): + s = s + "%03s" % string[i:i + 2] + return s[1:] + + def w_print_diff(self, message, d1, d2, expectedResult=None): + """Print different outputs for same message.""" + print("Message: '%s'" % message) + print("Message length: %d" % len(message)) + if expectedResult: + print("%-48s (expected)" % self._format(expectedResult)) + print("%-48s (Std. lib. MD5)" % self._format_hex(d1)) + print("%-48s (Pure Python MD5)" % self._format_hex(d2)) + print() + def test1(self): - "Test cases with known digest result." - + """Test cases with known digest result.""" cases = ( - ("", - "d41d8cd98f00b204e9800998ecf8427e"), - ("a", - "0cc175b9c0f1b6a831c399e269772661"), - ("abc", - "900150983cd24fb0d6963f7d28e17f72"), - ("message digest", - "f96b697d7cb7938d525a2f31aaf161d0"), - ("abcdefghijklmnopqrstuvwxyz", - "c3fcd3d76192e4007dfb496cca67e13b"), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "d174ab98d277d9f5a5611c2c9f419d9f"), - ("1234567890"*8, - "57edf4a22be3c955ac49da2e2107b67a"), - ) + ("", + "d41d8cd98f00b204e9800998ecf8427e"), + ("a", + "0cc175b9c0f1b6a831c399e269772661"), + ("abc", + "900150983cd24fb0d6963f7d28e17f72"), + ("message digest", + "f96b697d7cb7938d525a2f31aaf161d0"), + ("abcdefghijklmnopqrstuvwxyz", + "c3fcd3d76192e4007dfb496cca67e13b"), + ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "d174ab98d277d9f5a5611c2c9f419d9f"), + ("1234567890"*8, + "57edf4a22be3c955ac49da2e2107b67a"), + ) - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message, expectedResult = cases[i][0], None if len(cases[i]) == 2: expectedResult = cases[i][1] - printDiff(message, d1, d2, expectedResult) + self.print_diff(message, d1, d2, expectedResult) assert res is None + def test2(self): + """Test cases without known digest result.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) - def test2(self): - "Test cases without known digest result." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None + def test3(self): + """Test cases with long messages (can take a while).""" + cases = ( + (2**10*'a',), + (2**10*'abcd',), + #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium. + ) - def test3(self): - "Test cases with long messages (can take a while)." - - cases = ( - (2**10*'a',), - (2**10*'abcd',), -## (2**20*'a',), ## 1 MB, takes about 160 sec. on a 233 Mhz Pentium. - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - def test4(self): - "Test cases with increasingly growing message lengths." - + """Test cases with increasingly growing message lengths.""" i = 0 - while i < 2**5: + while i < 2**5: message = i * 'a' - res = compareImp(message) + res = self.compare(message) if res is not None: d1, d2 = res - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - i = i + 1 + i += 1 - - def test5(self): - "Test updating cloned objects." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - # Load both with same prefix. - prefix1 = 2**10 * 'a' - - m1 = md5.md5() - m1.update(prefix1) - m1c = m1.copy() - - m2 = pymd5.new() - m2.update(prefix1) - m2c = m2.copy() - - # Update and compare... - for i in xrange(len(cases)): - message = cases[i][0] - - m1c.update(message) - d1 = m1c.hexdigest() - - m2c.update(message) - d2 = m2c.hexdigest() - - assert d1 == d2 - - -def test_attributes(): - assert pymd5.digest_size == 16 - assert pymd5.new().digest_size == 16 - assert pymd5.new().digestsize == 16 - assert pymd5.new().block_size == 64 + def test_attributes(self): + _md5 = self._md5 + assert _md5.digest_size == 16 + assert _md5.new().digest_size == 16 + assert _md5.new().digestsize == 16 + assert _md5.new().block_size == 64 From noreply at buildbot.pypy.org Tue Jul 23 03:26:02 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 23 Jul 2013 03:26:02 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid the explicit resource import for the sake of the py3k branch (which SyntaxErrors) Message-ID: <20130723012602.7E7CD1C07B6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65529:07b497ef4cb7 Date: 2013-07-22 18:16 -0700 http://bitbucket.org/pypy/pypy/changeset/07b497ef4cb7/ Log: avoid the explicit resource import for the sake of the py3k branch (which SyntaxErrors) diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -33,10 +33,8 @@ def test_resource(): - try: - import lib_pypy.resource - except ImportError: - py.test.skip('no syslog on this platform') + if sys.platform == 'win32': + py.test.skip('no resource module on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d From noreply at buildbot.pypy.org Tue Jul 23 03:26:03 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 23 Jul 2013 03:26:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130723012603.D00901C1007@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65530:2398c0569891 Date: 2013-07-22 18:17 -0700 http://bitbucket.org/pypy/pypy/changeset/2398c0569891/ Log: merge default diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -7,7 +7,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -33,10 +33,8 @@ def test_resource(): - try: - import lib_pypy.resource - except ImportError: - py.test.skip('no syslog on this platform') + if sys.platform == 'win32': + py.test.skip('no resource module on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/pypy/module/test_lib_pypy/test_md5_extra.py b/pypy/module/test_lib_pypy/test_md5_extra.py --- a/pypy/module/test_lib_pypy/test_md5_extra.py +++ b/pypy/module/test_lib_pypy/test_md5_extra.py @@ -1,210 +1,71 @@ """A test script to compare MD5 implementations. -A note about performance: the pure Python MD5 takes roughly -160 sec. per MB of data on a 233 MHz Intel Pentium CPU. +A note about performance: the pure Python MD5 takes roughly 160 sec. per +MB of data on a 233 MHz Intel Pentium CPU. """ +import md5 -import md5 # CPython's implementation in C. -from lib_pypy import _md5 as pymd5 +from pypy.module.test_lib_pypy.support import import_lib_pypy -# Helpers... +def compare_host(message, d2, d2h): + """Compare results against the host Python's builtin md5. -def formatHex(str): - "Print a string's HEX code in groups of two digits." - - d = list(str) - d = map(ord, d) - d = ["%02x" % x for x in d] - return ' '.join(d) - - -def format(str): - "Print a string as-is in groups of two characters." - - s = '' - for i in range(0, len(str)-1, 2): - s = s + "%03s" % str[i:i+2] - return s[1:] - - -def printDiff(message, d1, d2, expectedResult=None): - "Print different outputs for same message." - - print("Message: '%s'" % message) - print("Message length: %d" % len(message)) - if expectedResult: - print("%-48s (expected)" % format(expectedResult)) - print("%-48s (Std. lib. MD5)" % formatHex(d1)) - print("%-48s (Pure Python MD5)" % formatHex(d2)) - print() - - -# The real comparison function. - -def compareImp(message): - """Compare two MD5 implementations, C vs. pure Python module. - - For equal digests this returns None, otherwise it returns - a tuple of both digests. + For equal digests this returns None, otherwise it returns a tuple of + both digests. """ - - # Use Python's standard library MD5 compiled C module. + # Use the host Python's standard library MD5 compiled C module. m1 = md5.md5() m1.update(message) d1 = m1.digest() d1h = m1.hexdigest() - - # Use MD5 module in pure Python. - m2 = pymd5.new() - m2.update(message) - d2 = m2.digest() - d2h = m2.hexdigest() + # Return None if equal or the different digests if not equal. + return None if d1 == d2 and d1h == d2h else (d1, d2) - # Return None if equal or the different digests if not equal. - if d1 == d2 and d1h == d2h: - return - else: - return d1, d2 +class TestMD5Update: -class TestMD5Compare: - "Compare pure Python MD5 against Python's std. lib. version." - - def test1(self): - "Test cases with known digest result." - + spaceconfig = dict(usemodules=('struct',)) + + def test_update(self): + """Test updating cloned objects.""" cases = ( - ("", - "d41d8cd98f00b204e9800998ecf8427e"), - ("a", - "0cc175b9c0f1b6a831c399e269772661"), - ("abc", - "900150983cd24fb0d6963f7d28e17f72"), - ("message digest", - "f96b697d7cb7938d525a2f31aaf161d0"), - ("abcdefghijklmnopqrstuvwxyz", - "c3fcd3d76192e4007dfb496cca67e13b"), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "d174ab98d277d9f5a5611c2c9f419d9f"), - ("1234567890"*8, - "57edf4a22be3c955ac49da2e2107b67a"), - ) + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) + space = self.space + w__md5 = import_lib_pypy(space, '_md5') - for i in range(len(cases)): - res = compareImp(cases[i][0]) - if res is not None: - d1, d2 = res - message, expectedResult = cases[i][0], None - if len(cases[i]) == 2: - expectedResult = cases[i][1] - printDiff(message, d1, d2, expectedResult) - assert res is None - - - def test2(self): - "Test cases without known digest result." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - for i in range(len(cases)): - res = compareImp(cases[i][0]) - if res is not None: - d1, d2 = res - message = cases[i][0] - printDiff(message, d1, d2) - assert res is None - - - def test3(self): - "Test cases with long messages (can take a while)." - - cases = ( - (2**10*'a',), - (2**10*'abcd',), -## (2**20*'a',), ## 1 MB, takes about 160 sec. on a 233 Mhz Pentium. - ) - - for i in range(len(cases)): - res = compareImp(cases[i][0]) - if res is not None: - d1, d2 = res - message = cases[i][0] - printDiff(message, d1, d2) - assert res is None - - - def test4(self): - "Test cases with increasingly growing message lengths." - - i = 0 - while i < 2**5: - message = i * 'a' - res = compareImp(message) - if res is not None: - d1, d2 = res - printDiff(message, d1, d2) - assert res is None - i = i + 1 - - - def test5(self): - "Test updating cloned objects." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - # Load both with same prefix. + # Load both with same prefix. prefix1 = 2**10 * 'a' + # The host md5 m1 = md5.md5() m1.update(prefix1) m1c = m1.copy() - m2 = pymd5.new() - m2.update(prefix1) - m2c = m2.copy() + # The app-level _md5 + w_m2 = space.call_method(w__md5, 'new') + space.call_method(w_m2, 'update', space.wrap(prefix1)) + w_m2c = space.call_method(w_m2, 'copy') # Update and compare... for i in range(len(cases)): @@ -213,14 +74,153 @@ m1c.update(message) d1 = m1c.hexdigest() - m2c.update(message) - d2 = m2c.hexdigest() + space.call_method(w_m2c, 'update', space.wrap(message)) + w_d2 = space.call_method(w_m2c, 'hexdigest') + d2 = space.str_w(w_d2) assert d1 == d2 -def test_attributes(): - assert pymd5.digest_size == 16 - assert pymd5.new().digest_size == 16 - assert pymd5.new().digestsize == 16 - assert pymd5.new().block_size == 64 +class AppTestMD5Compare: + """Compare pure Python MD5 against Python's std. lib. version.""" + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + from pypy.interpreter import gateway + space = cls.space + cls.w__md5 = import_lib_pypy(space, '_md5') + if cls.runappdirect: + # interp2app doesn't work in appdirect mode + cls.w_compare_host = staticmethod(compare_host) + else: + compare_host.unwrap_spec = [str, str, str] + cls.w_compare_host = space.wrap(gateway.interp2app(compare_host)) + + def w_compare(self, message): + # Generate results against the app-level pure Python MD5 and + # pass them off for comparison against the host Python's MD5 + m2 = self._md5.new() + m2.update(message) + return self.compare_host(message, m2.digest(), m2.hexdigest()) + + def w__format_hex(self, string): + """Print a string's HEX code in groups of two digits.""" + d = map(None, string) + d = map(ord, d) + d = map(lambda x: "%02x" % x, d) + return ' '.join(d) + + def w__format(self, string): + """Print a string as-is in groups of two characters.""" + s = '' + for i in range(0, len(string) - 1, 2): + s = s + "%03s" % string[i:i + 2] + return s[1:] + + def w_print_diff(self, message, d1, d2, expectedResult=None): + """Print different outputs for same message.""" + print("Message: '%s'" % message) + print("Message length: %d" % len(message)) + if expectedResult: + print("%-48s (expected)" % self._format(expectedResult)) + print("%-48s (Std. lib. MD5)" % self._format_hex(d1)) + print("%-48s (Pure Python MD5)" % self._format_hex(d2)) + print() + + def test1(self): + """Test cases with known digest result.""" + cases = ( + ("", + "d41d8cd98f00b204e9800998ecf8427e"), + ("a", + "0cc175b9c0f1b6a831c399e269772661"), + ("abc", + "900150983cd24fb0d6963f7d28e17f72"), + ("message digest", + "f96b697d7cb7938d525a2f31aaf161d0"), + ("abcdefghijklmnopqrstuvwxyz", + "c3fcd3d76192e4007dfb496cca67e13b"), + ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "d174ab98d277d9f5a5611c2c9f419d9f"), + ("1234567890"*8, + "57edf4a22be3c955ac49da2e2107b67a"), + ) + + for i in range(len(cases)): + res = self.compare(cases[i][0]) + if res is not None: + d1, d2 = res + message, expectedResult = cases[i][0], None + if len(cases[i]) == 2: + expectedResult = cases[i][1] + self.print_diff(message, d1, d2, expectedResult) + assert res is None + + def test2(self): + """Test cases without known digest result.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) + + for i in range(len(cases)): + res = self.compare(cases[i][0]) + if res is not None: + d1, d2 = res + message = cases[i][0] + self.print_diff(message, d1, d2) + assert res is None + + def test3(self): + """Test cases with long messages (can take a while).""" + cases = ( + (2**10*'a',), + (2**10*'abcd',), + #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium. + ) + + for i in range(len(cases)): + res = self.compare(cases[i][0]) + if res is not None: + d1, d2 = res + message = cases[i][0] + self.print_diff(message, d1, d2) + assert res is None + + def test4(self): + """Test cases with increasingly growing message lengths.""" + i = 0 + while i < 2**5: + message = i * 'a' + res = self.compare(message) + if res is not None: + d1, d2 = res + self.print_diff(message, d1, d2) + assert res is None + i += 1 + + def test_attributes(self): + _md5 = self._md5 + assert _md5.digest_size == 16 + assert _md5.new().digest_size == 16 + assert _md5.new().digestsize == 16 + assert _md5.new().block_size == 64 From noreply at buildbot.pypy.org Tue Jul 23 03:26:05 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 23 Jul 2013 03:26:05 +0200 (CEST) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20130723012605.17EDB1C303C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65531:213bae1fcc89 Date: 2013-07-22 18:23 -0700 http://bitbucket.org/pypy/pypy/changeset/213bae1fcc89/ Log: adapt to py3 diff --git a/pypy/module/test_lib_pypy/test_md5_extra.py b/pypy/module/test_lib_pypy/test_md5_extra.py --- a/pypy/module/test_lib_pypy/test_md5_extra.py +++ b/pypy/module/test_lib_pypy/test_md5_extra.py @@ -27,6 +27,12 @@ spaceconfig = dict(usemodules=('struct',)) + def setup_class(cls): + if cls.runappdirect: + # XXX: + import py + py.test.skip('Unavailable under py3 runappdirect') + def test_update(self): """Test updating cloned objects.""" cases = ( @@ -63,7 +69,7 @@ m1c = m1.copy() # The app-level _md5 - w_m2 = space.call_method(w__md5, 'new') + w_m2 = space.call_method(w__md5, 'md5') space.call_method(w_m2, 'update', space.wrap(prefix1)) w_m2c = space.call_method(w_m2, 'copy') @@ -91,8 +97,9 @@ space = cls.space cls.w__md5 = import_lib_pypy(space, '_md5') if cls.runappdirect: - # interp2app doesn't work in appdirect mode - cls.w_compare_host = staticmethod(compare_host) + # XXX: + import py + py.test.skip('Unavailable under py3 runappdirect') else: compare_host.unwrap_spec = [str, str, str] cls.w_compare_host = space.wrap(gateway.interp2app(compare_host)) @@ -100,7 +107,7 @@ def w_compare(self, message): # Generate results against the app-level pure Python MD5 and # pass them off for comparison against the host Python's MD5 - m2 = self._md5.new() + m2 = self._md5.md5() m2.update(message) return self.compare_host(message, m2.digest(), m2.hexdigest()) @@ -220,7 +227,6 @@ def test_attributes(self): _md5 = self._md5 - assert _md5.digest_size == 16 - assert _md5.new().digest_size == 16 - assert _md5.new().digestsize == 16 - assert _md5.new().block_size == 64 + assert _md5.md5().digest_size == 16 + assert _md5.md5().digestsize == 16 + assert _md5.md5().block_size == 64 From noreply at buildbot.pypy.org Tue Jul 23 03:26:06 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 23 Jul 2013 03:26:06 +0200 (CEST) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20130723012606.3E9801C303D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65532:bed1ea0ec233 Date: 2013-07-22 18:25 -0700 http://bitbucket.org/pypy/pypy/changeset/bed1ea0ec233/ Log: merge upstream diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -33,10 +33,8 @@ def test_resource(): - try: - import lib_pypy.resource - except ImportError: - py.test.skip('no syslog on this platform') + if sys.platform == 'win32': + py.test.skip('no resource module on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/pypy/module/test_lib_pypy/test_md5_extra.py b/pypy/module/test_lib_pypy/test_md5_extra.py --- a/pypy/module/test_lib_pypy/test_md5_extra.py +++ b/pypy/module/test_lib_pypy/test_md5_extra.py @@ -1,227 +1,226 @@ """A test script to compare MD5 implementations. -A note about performance: the pure Python MD5 takes roughly -160 sec. per MB of data on a 233 MHz Intel Pentium CPU. +A note about performance: the pure Python MD5 takes roughly 160 sec. per +MB of data on a 233 MHz Intel Pentium CPU. """ +import md5 -from __future__ import absolute_import -import md5 # CPython's implementation in C. -from lib_pypy import _md5 as pymd5 +from pypy.module.test_lib_pypy.support import import_lib_pypy -# Helpers... +def compare_host(message, d2, d2h): + """Compare results against the host Python's builtin md5. -def formatHex(str): - "Print a string's HEX code in groups of two digits." - - d = map(None, str) - d = map(ord, d) - d = map(lambda x:"%02x" % x, d) - return ' '.join(d) - - -def format(str): - "Print a string as-is in groups of two characters." - - s = '' - for i in range(0, len(str)-1, 2): - s = s + "%03s" % str[i:i+2] - return s[1:] - - -def printDiff(message, d1, d2, expectedResult=None): - "Print different outputs for same message." - - print "Message: '%s'" % message - print "Message length: %d" % len(message) - if expectedResult: - print "%-48s (expected)" % format(expectedResult) - print "%-48s (Std. lib. MD5)" % formatHex(d1) - print "%-48s (Pure Python MD5)" % formatHex(d2) - print - - -# The real comparison function. - -def compareImp(message): - """Compare two MD5 implementations, C vs. pure Python module. - - For equal digests this returns None, otherwise it returns - a tuple of both digests. + For equal digests this returns None, otherwise it returns a tuple of + both digests. """ - - # Use Python's standard library MD5 compiled C module. + # Use the host Python's standard library MD5 compiled C module. m1 = md5.md5() m1.update(message) d1 = m1.digest() d1h = m1.hexdigest() - - # Use MD5 module in pure Python. - m2 = pymd5.new() - m2.update(message) - d2 = m2.digest() - d2h = m2.hexdigest() + # Return None if equal or the different digests if not equal. + return None if d1 == d2 and d1h == d2h else (d1, d2) - # Return None if equal or the different digests if not equal. - if d1 == d2 and d1h == d2h: - return - else: - return d1, d2 +class TestMD5Update: -class TestMD5Compare: - "Compare pure Python MD5 against Python's std. lib. version." - + spaceconfig = dict(usemodules=('struct',)) + + def test_update(self): + """Test updating cloned objects.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) + space = self.space + w__md5 = import_lib_pypy(space, '_md5') + + # Load both with same prefix. + prefix1 = 2**10 * 'a' + + # The host md5 + m1 = md5.md5() + m1.update(prefix1) + m1c = m1.copy() + + # The app-level _md5 + w_m2 = space.call_method(w__md5, 'new') + space.call_method(w_m2, 'update', space.wrap(prefix1)) + w_m2c = space.call_method(w_m2, 'copy') + + # Update and compare... + for i in range(len(cases)): + message = cases[i][0] + + m1c.update(message) + d1 = m1c.hexdigest() + + space.call_method(w_m2c, 'update', space.wrap(message)) + w_d2 = space.call_method(w_m2c, 'hexdigest') + d2 = space.str_w(w_d2) + + assert d1 == d2 + + +class AppTestMD5Compare: + """Compare pure Python MD5 against Python's std. lib. version.""" + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + from pypy.interpreter import gateway + space = cls.space + cls.w__md5 = import_lib_pypy(space, '_md5') + if cls.runappdirect: + # interp2app doesn't work in appdirect mode + cls.w_compare_host = staticmethod(compare_host) + else: + compare_host.unwrap_spec = [str, str, str] + cls.w_compare_host = space.wrap(gateway.interp2app(compare_host)) + + def w_compare(self, message): + # Generate results against the app-level pure Python MD5 and + # pass them off for comparison against the host Python's MD5 + m2 = self._md5.new() + m2.update(message) + return self.compare_host(message, m2.digest(), m2.hexdigest()) + + def w__format_hex(self, string): + """Print a string's HEX code in groups of two digits.""" + d = map(None, string) + d = map(ord, d) + d = map(lambda x: "%02x" % x, d) + return ' '.join(d) + + def w__format(self, string): + """Print a string as-is in groups of two characters.""" + s = '' + for i in range(0, len(string) - 1, 2): + s = s + "%03s" % string[i:i + 2] + return s[1:] + + def w_print_diff(self, message, d1, d2, expectedResult=None): + """Print different outputs for same message.""" + print("Message: '%s'" % message) + print("Message length: %d" % len(message)) + if expectedResult: + print("%-48s (expected)" % self._format(expectedResult)) + print("%-48s (Std. lib. MD5)" % self._format_hex(d1)) + print("%-48s (Pure Python MD5)" % self._format_hex(d2)) + print() + def test1(self): - "Test cases with known digest result." - + """Test cases with known digest result.""" cases = ( - ("", - "d41d8cd98f00b204e9800998ecf8427e"), - ("a", - "0cc175b9c0f1b6a831c399e269772661"), - ("abc", - "900150983cd24fb0d6963f7d28e17f72"), - ("message digest", - "f96b697d7cb7938d525a2f31aaf161d0"), - ("abcdefghijklmnopqrstuvwxyz", - "c3fcd3d76192e4007dfb496cca67e13b"), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "d174ab98d277d9f5a5611c2c9f419d9f"), - ("1234567890"*8, - "57edf4a22be3c955ac49da2e2107b67a"), - ) + ("", + "d41d8cd98f00b204e9800998ecf8427e"), + ("a", + "0cc175b9c0f1b6a831c399e269772661"), + ("abc", + "900150983cd24fb0d6963f7d28e17f72"), + ("message digest", + "f96b697d7cb7938d525a2f31aaf161d0"), + ("abcdefghijklmnopqrstuvwxyz", + "c3fcd3d76192e4007dfb496cca67e13b"), + ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "d174ab98d277d9f5a5611c2c9f419d9f"), + ("1234567890"*8, + "57edf4a22be3c955ac49da2e2107b67a"), + ) - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message, expectedResult = cases[i][0], None if len(cases[i]) == 2: expectedResult = cases[i][1] - printDiff(message, d1, d2, expectedResult) + self.print_diff(message, d1, d2, expectedResult) assert res is None + def test2(self): + """Test cases without known digest result.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) - def test2(self): - "Test cases without known digest result." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None + def test3(self): + """Test cases with long messages (can take a while).""" + cases = ( + (2**10*'a',), + (2**10*'abcd',), + #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium. + ) - def test3(self): - "Test cases with long messages (can take a while)." - - cases = ( - (2**10*'a',), - (2**10*'abcd',), -## (2**20*'a',), ## 1 MB, takes about 160 sec. on a 233 Mhz Pentium. - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - def test4(self): - "Test cases with increasingly growing message lengths." - + """Test cases with increasingly growing message lengths.""" i = 0 - while i < 2**5: + while i < 2**5: message = i * 'a' - res = compareImp(message) + res = self.compare(message) if res is not None: d1, d2 = res - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - i = i + 1 + i += 1 - - def test5(self): - "Test updating cloned objects." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - # Load both with same prefix. - prefix1 = 2**10 * 'a' - - m1 = md5.md5() - m1.update(prefix1) - m1c = m1.copy() - - m2 = pymd5.new() - m2.update(prefix1) - m2c = m2.copy() - - # Update and compare... - for i in xrange(len(cases)): - message = cases[i][0] - - m1c.update(message) - d1 = m1c.hexdigest() - - m2c.update(message) - d2 = m2c.hexdigest() - - assert d1 == d2 - - -def test_attributes(): - assert pymd5.digest_size == 16 - assert pymd5.new().digest_size == 16 - assert pymd5.new().digestsize == 16 - assert pymd5.new().block_size == 64 + def test_attributes(self): + _md5 = self._md5 + assert _md5.digest_size == 16 + assert _md5.new().digest_size == 16 + assert _md5.new().digestsize == 16 + assert _md5.new().block_size == 64 From noreply at buildbot.pypy.org Tue Jul 23 03:48:24 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 23 Jul 2013 03:48:24 +0200 (CEST) Subject: [pypy-commit] pypy py3k: properly escape any path Message-ID: <20130723014824.3B09E1C016D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65533:41898d5a38dd Date: 2013-07-22 18:47 -0700 http://bitbucket.org/pypy/pypy/changeset/41898d5a38dd/ Log: properly escape any path diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py --- a/pypy/tool/pytest/apptest.py +++ b/pypy/tool/pytest/apptest.py @@ -72,7 +72,7 @@ helpers = r"""# -*- encoding: utf-8 -*- if 1: import sys - sys.path.append('%s') + sys.path.append(%r) %s def skip(message): print(message) From noreply at buildbot.pypy.org Tue Jul 23 05:59:03 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 23 Jul 2013 05:59:03 +0200 (CEST) Subject: [pypy-commit] pypy default: bextr operation cannot produce GC pointers Message-ID: <20130723035903.3BDCB1C04B4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65534:6345a967f4df Date: 2013-07-22 20:57 -0700 http://bitbucket.org/pypy/pypy/changeset/6345a967f4df/ Log: bextr operation cannot produce GC pointers diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -478,7 +478,7 @@ 'rep', 'movs', 'movhp', 'lods', 'stos', 'scas', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', - 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', + 'cvt', 'ucomi', 'comi', 'subs', 'subp', 'adds', 'addp', 'xorp', 'movap', 'movd', 'movlp', 'movup', 'sqrt', 'rsqrt', 'movhlp', 'movlhp', 'mins', 'minp', 'maxs', 'maxp', 'unpck', 'pxor', 'por', # sse2 'shufps', 'shufpd', @@ -495,13 +495,15 @@ # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers - 'movz', + 'movz', # locked operations should not move GC pointers, at least so far 'lock', 'pause', # non-temporal moves should be reserved for areas containing # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', - ]) + # bit manipulations + 'bextr', + ]) # a partial list is hopefully good enough for now; it's all to support # only one corner case, tested in elf64/track_zero.s @@ -741,7 +743,7 @@ # tail-calls are equivalent to RET for us return InsnRet(self.CALLEE_SAVE_REGISTERS) return InsnStop("jump") - + def register_jump_to(self, label, lastinsn=None): if lastinsn is None: lastinsn = self.insns[-1] @@ -1020,7 +1022,7 @@ visit_movl = visit_mov visit_xorl = _maybe_32bit_dest(FunctionGcRootTracker.binary_insn) - + visit_pushq = FunctionGcRootTracker._visit_push visit_addq = FunctionGcRootTracker._visit_add From noreply at buildbot.pypy.org Tue Jul 23 05:59:04 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 23 Jul 2013 05:59:04 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20130723035904.7232F1C04B4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65535:42cc3499acc4 Date: 2013-07-22 20:58 -0700 http://bitbucket.org/pypy/pypy/changeset/42cc3499acc4/ Log: merged upstream diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -33,10 +33,8 @@ def test_resource(): - try: - import lib_pypy.resource - except ImportError: - py.test.skip('no syslog on this platform') + if sys.platform == 'win32': + py.test.skip('no resource module on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/pypy/module/test_lib_pypy/test_md5_extra.py b/pypy/module/test_lib_pypy/test_md5_extra.py --- a/pypy/module/test_lib_pypy/test_md5_extra.py +++ b/pypy/module/test_lib_pypy/test_md5_extra.py @@ -1,227 +1,226 @@ """A test script to compare MD5 implementations. -A note about performance: the pure Python MD5 takes roughly -160 sec. per MB of data on a 233 MHz Intel Pentium CPU. +A note about performance: the pure Python MD5 takes roughly 160 sec. per +MB of data on a 233 MHz Intel Pentium CPU. """ +import md5 -from __future__ import absolute_import -import md5 # CPython's implementation in C. -from lib_pypy import _md5 as pymd5 +from pypy.module.test_lib_pypy.support import import_lib_pypy -# Helpers... +def compare_host(message, d2, d2h): + """Compare results against the host Python's builtin md5. -def formatHex(str): - "Print a string's HEX code in groups of two digits." - - d = map(None, str) - d = map(ord, d) - d = map(lambda x:"%02x" % x, d) - return ' '.join(d) - - -def format(str): - "Print a string as-is in groups of two characters." - - s = '' - for i in range(0, len(str)-1, 2): - s = s + "%03s" % str[i:i+2] - return s[1:] - - -def printDiff(message, d1, d2, expectedResult=None): - "Print different outputs for same message." - - print "Message: '%s'" % message - print "Message length: %d" % len(message) - if expectedResult: - print "%-48s (expected)" % format(expectedResult) - print "%-48s (Std. lib. MD5)" % formatHex(d1) - print "%-48s (Pure Python MD5)" % formatHex(d2) - print - - -# The real comparison function. - -def compareImp(message): - """Compare two MD5 implementations, C vs. pure Python module. - - For equal digests this returns None, otherwise it returns - a tuple of both digests. + For equal digests this returns None, otherwise it returns a tuple of + both digests. """ - - # Use Python's standard library MD5 compiled C module. + # Use the host Python's standard library MD5 compiled C module. m1 = md5.md5() m1.update(message) d1 = m1.digest() d1h = m1.hexdigest() - - # Use MD5 module in pure Python. - m2 = pymd5.new() - m2.update(message) - d2 = m2.digest() - d2h = m2.hexdigest() + # Return None if equal or the different digests if not equal. + return None if d1 == d2 and d1h == d2h else (d1, d2) - # Return None if equal or the different digests if not equal. - if d1 == d2 and d1h == d2h: - return - else: - return d1, d2 +class TestMD5Update: -class TestMD5Compare: - "Compare pure Python MD5 against Python's std. lib. version." - + spaceconfig = dict(usemodules=('struct',)) + + def test_update(self): + """Test updating cloned objects.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) + space = self.space + w__md5 = import_lib_pypy(space, '_md5') + + # Load both with same prefix. + prefix1 = 2**10 * 'a' + + # The host md5 + m1 = md5.md5() + m1.update(prefix1) + m1c = m1.copy() + + # The app-level _md5 + w_m2 = space.call_method(w__md5, 'new') + space.call_method(w_m2, 'update', space.wrap(prefix1)) + w_m2c = space.call_method(w_m2, 'copy') + + # Update and compare... + for i in range(len(cases)): + message = cases[i][0] + + m1c.update(message) + d1 = m1c.hexdigest() + + space.call_method(w_m2c, 'update', space.wrap(message)) + w_d2 = space.call_method(w_m2c, 'hexdigest') + d2 = space.str_w(w_d2) + + assert d1 == d2 + + +class AppTestMD5Compare: + """Compare pure Python MD5 against Python's std. lib. version.""" + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + from pypy.interpreter import gateway + space = cls.space + cls.w__md5 = import_lib_pypy(space, '_md5') + if cls.runappdirect: + # interp2app doesn't work in appdirect mode + cls.w_compare_host = staticmethod(compare_host) + else: + compare_host.unwrap_spec = [str, str, str] + cls.w_compare_host = space.wrap(gateway.interp2app(compare_host)) + + def w_compare(self, message): + # Generate results against the app-level pure Python MD5 and + # pass them off for comparison against the host Python's MD5 + m2 = self._md5.new() + m2.update(message) + return self.compare_host(message, m2.digest(), m2.hexdigest()) + + def w__format_hex(self, string): + """Print a string's HEX code in groups of two digits.""" + d = map(None, string) + d = map(ord, d) + d = map(lambda x: "%02x" % x, d) + return ' '.join(d) + + def w__format(self, string): + """Print a string as-is in groups of two characters.""" + s = '' + for i in range(0, len(string) - 1, 2): + s = s + "%03s" % string[i:i + 2] + return s[1:] + + def w_print_diff(self, message, d1, d2, expectedResult=None): + """Print different outputs for same message.""" + print("Message: '%s'" % message) + print("Message length: %d" % len(message)) + if expectedResult: + print("%-48s (expected)" % self._format(expectedResult)) + print("%-48s (Std. lib. MD5)" % self._format_hex(d1)) + print("%-48s (Pure Python MD5)" % self._format_hex(d2)) + print() + def test1(self): - "Test cases with known digest result." - + """Test cases with known digest result.""" cases = ( - ("", - "d41d8cd98f00b204e9800998ecf8427e"), - ("a", - "0cc175b9c0f1b6a831c399e269772661"), - ("abc", - "900150983cd24fb0d6963f7d28e17f72"), - ("message digest", - "f96b697d7cb7938d525a2f31aaf161d0"), - ("abcdefghijklmnopqrstuvwxyz", - "c3fcd3d76192e4007dfb496cca67e13b"), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "d174ab98d277d9f5a5611c2c9f419d9f"), - ("1234567890"*8, - "57edf4a22be3c955ac49da2e2107b67a"), - ) + ("", + "d41d8cd98f00b204e9800998ecf8427e"), + ("a", + "0cc175b9c0f1b6a831c399e269772661"), + ("abc", + "900150983cd24fb0d6963f7d28e17f72"), + ("message digest", + "f96b697d7cb7938d525a2f31aaf161d0"), + ("abcdefghijklmnopqrstuvwxyz", + "c3fcd3d76192e4007dfb496cca67e13b"), + ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "d174ab98d277d9f5a5611c2c9f419d9f"), + ("1234567890"*8, + "57edf4a22be3c955ac49da2e2107b67a"), + ) - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message, expectedResult = cases[i][0], None if len(cases[i]) == 2: expectedResult = cases[i][1] - printDiff(message, d1, d2, expectedResult) + self.print_diff(message, d1, d2, expectedResult) assert res is None + def test2(self): + """Test cases without known digest result.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) - def test2(self): - "Test cases without known digest result." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None + def test3(self): + """Test cases with long messages (can take a while).""" + cases = ( + (2**10*'a',), + (2**10*'abcd',), + #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium. + ) - def test3(self): - "Test cases with long messages (can take a while)." - - cases = ( - (2**10*'a',), - (2**10*'abcd',), -## (2**20*'a',), ## 1 MB, takes about 160 sec. on a 233 Mhz Pentium. - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - def test4(self): - "Test cases with increasingly growing message lengths." - + """Test cases with increasingly growing message lengths.""" i = 0 - while i < 2**5: + while i < 2**5: message = i * 'a' - res = compareImp(message) + res = self.compare(message) if res is not None: d1, d2 = res - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - i = i + 1 + i += 1 - - def test5(self): - "Test updating cloned objects." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - # Load both with same prefix. - prefix1 = 2**10 * 'a' - - m1 = md5.md5() - m1.update(prefix1) - m1c = m1.copy() - - m2 = pymd5.new() - m2.update(prefix1) - m2c = m2.copy() - - # Update and compare... - for i in xrange(len(cases)): - message = cases[i][0] - - m1c.update(message) - d1 = m1c.hexdigest() - - m2c.update(message) - d2 = m2c.hexdigest() - - assert d1 == d2 - - -def test_attributes(): - assert pymd5.digest_size == 16 - assert pymd5.new().digest_size == 16 - assert pymd5.new().digestsize == 16 - assert pymd5.new().block_size == 64 + def test_attributes(self): + _md5 = self._md5 + assert _md5.digest_size == 16 + assert _md5.new().digest_size == 16 + assert _md5.new().digestsize == 16 + assert _md5.new().block_size == 64 From noreply at buildbot.pypy.org Tue Jul 23 08:54:19 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 23 Jul 2013 08:54:19 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix logparser and viewcode.py for new thread number prefix in logs Message-ID: <20130723065419.E3F5B1C04B4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65536:3879aa784b4c Date: 2013-07-23 08:31 +0200 http://bitbucket.org/pypy/pypy/changeset/3879aa784b4c/ Log: fix logparser and viewcode.py for new thread number prefix in logs diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -239,6 +239,7 @@ def parse(self, f, textonly=True): for line in f: + line = line[line.find('#') + 1:].strip() if line.startswith('BACKEND '): self.backend_name = line.split(' ')[1].strip() elif line.startswith('CODE_DUMP '): diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -26,8 +26,11 @@ def parse_log(lines, verbose=False): color = "(?:\x1b.*?m)?" - r_start = re.compile(color + r"\[([0-9a-fA-F]+)\] \{([\w-]+)" + color + "$") - r_stop = re.compile(color + r"\[([0-9a-fA-F]+)\] ([\w-]+)\}" + color + "$") + thread = "\d+#\s" + r_start = re.compile(color + thread + + r"\[([0-9a-fA-F]+)\] \{([\w-]+)" + color + "$") + r_stop = re.compile(color + thread + + r"\[([0-9a-fA-F]+)\] ([\w-]+)\}" + color + "$") lasttime = 0 log = DebugLog() time_decrase = False From noreply at buildbot.pypy.org Tue Jul 23 08:54:21 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 23 Jul 2013 08:54:21 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix more tools for new thread number prefix in logs Message-ID: <20130723065421.2E6571C04B4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65537:3e6fe880134a Date: 2013-07-23 08:53 +0200 http://bitbucket.org/pypy/pypy/changeset/3e6fe880134a/ Log: fix more tools for new thread number prefix in logs diff --git a/rpython/jit/tool/loopcounter.py b/rpython/jit/tool/loopcounter.py --- a/rpython/jit/tool/loopcounter.py +++ b/rpython/jit/tool/loopcounter.py @@ -17,11 +17,13 @@ time0 = None lines = iter(log) for line in lines: + line = line[line.find("#") + 1:].strip() if time0 is None and line.startswith('['): time0 = get_timestamp(line) if '{jit-mem-looptoken-' in line: time_now = get_timestamp(line) - time0 text = lines.next() + text = text[text.find("#") + 1:].strip() if text.startswith('allocating Loop #'): loops += 1 elif text.startswith('allocating Bridge #'): diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -309,6 +309,7 @@ newlines = [] first_comment = None for line in lines: + line = line[line.find('#')+1:].strip() # for simplicity comments are not allowed on # debug_merge_point lines if '#' in line and 'debug_merge_point(' not in line: diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -187,7 +187,7 @@ return real_loops, allloops bar.render((i * 100) / len(loops)) firstline = loop[:loop.find("\n")] - m = re.match('# Loop (\d+)', firstline) + m = re.match('\d+# # Loop (\d+)', firstline) if m: no = int(m.group(1)) assert len(real_loops) == no From noreply at buildbot.pypy.org Tue Jul 23 10:03:12 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 23 Jul 2013 10:03:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: possibly fix things by not using registers that aren't saved Message-ID: <20130723080312.C145E1C305D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65538:c292736702e8 Date: 2013-07-23 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/c292736702e8/ Log: possibly fix things by not using registers that aren't saved diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -347,18 +347,19 @@ mc.CALL(imm(func)) # eax has result if IS_X86_32: - mc.ADD_ri(esp.value, 5 * WORD) + # ||val2|val1|retaddr|x||x|x|val2|val1| + mc.MOV_sr(7 * WORD, eax.value) + # ||result|val1|retaddr|x||x|x|val2|val1| else: - mc.ADD_ri(esp.value, WORD) - # - # result in eax, save (not sure if necessary) - mc.PUSH_r(eax.value) + # ||val2|val1||retaddr|x|| + mc.MOV_sr(3 * WORD, eax.value) + # ||result|val1||retaddr|x|| # self._pop_all_regs_from_frame(mc, [], withfloats=False, callee_only=True) # - mc.POP_r(eax.value) - mc.RET16_i(2 * WORD) + # only remove one arg: + mc.RET16_i(1 * WORD) rawstart = mc.materialize(self.cpu.asmmemmgr, []) self.ptr_eq_slowpath = rawstart @@ -465,11 +466,10 @@ self._pop_all_regs_from_frame(mc, [], withfloats, callee_only=True) if descr.returns_modified_object: - if IS_X86_32: - mc.MOV_rs(eax.value, 3 * WORD) - else: - mc.MOV_rs(eax.value, WORD) - mc.RET16_i(WORD) + # preserve argument which now holds the result + mc.RET() + else: + mc.RET16_i(WORD) else: if IS_X86_32: mc.MOV_rs(edx.value, 5 * WORD) @@ -481,7 +481,7 @@ mc.MOV(exc1, RawEspLoc(WORD * 7, INT)) if IS_X86_32: - mc.POP_r(edx.value) # return value + mc.POP_r(ecx.value) # return value else: mc.POP_r(edi.value) # return value @@ -2138,8 +2138,10 @@ mc.PUSH(a_base) func = self.ptr_eq_slowpath mc.CALL(imm(func)) + # result still on stack assert isinstance(result_loc, RegLoc) - mc.MOV_rr(result_loc.value, eax.value) + mc.POP_r(result_loc.value) + def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, align_stack=False): @@ -2166,15 +2168,17 @@ # ||retadr|...|| func = descr.get_b_slowpath(helper_num) mc.CALL(imm(func)) - - # result in eax, except if is_frame + # get result: if is_frame: + # result in register: if IS_X86_32: - mc.MOV_rr(loc_base.value, edx.value) + mc.MOV_rr(loc_base.value, ecx.value) else: mc.MOV_rr(loc_base.value, edi.value) else: - mc.MOV_rr(loc_base.value, eax.value) + # result where argument was: + mc.POP_r(loc_base.value) + if is_frame and align_stack: mc.ADD_ri(esp.value, 16 - WORD) # erase the return address From noreply at buildbot.pypy.org Tue Jul 23 11:14:44 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 23 Jul 2013 11:14:44 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130723091444.0C6511C14BB@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65540:dcefe6f331a5 Date: 2013-07-22 19:49 +0200 http://bitbucket.org/pypy/pypy/changeset/dcefe6f331a5/ Log: hg merge default diff too long, truncating to 2000 out of 12120 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -219,32 +282,32 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib_pypy/ctypes_config_cache/syslog.ctc.py b/lib_pypy/ctypes_config_cache/syslog.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/syslog.ctc.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -'ctypes_configure' source for syslog.py. -Run this to rebuild _syslog_cache.py. -""" - -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger) -import dumpcache - - -_CONSTANTS = ( - 'LOG_EMERG', - 'LOG_ALERT', - 'LOG_CRIT', - 'LOG_ERR', - 'LOG_WARNING', - 'LOG_NOTICE', - 'LOG_INFO', - 'LOG_DEBUG', - - 'LOG_PID', - 'LOG_CONS', - 'LOG_NDELAY', - - 'LOG_KERN', - 'LOG_USER', - 'LOG_MAIL', - 'LOG_DAEMON', - 'LOG_AUTH', - 'LOG_LPR', - 'LOG_LOCAL0', - 'LOG_LOCAL1', - 'LOG_LOCAL2', - 'LOG_LOCAL3', - 'LOG_LOCAL4', - 'LOG_LOCAL5', - 'LOG_LOCAL6', - 'LOG_LOCAL7', -) -_OPTIONAL_CONSTANTS = ( - 'LOG_NOWAIT', - 'LOG_PERROR', - - 'LOG_SYSLOG', - 'LOG_CRON', - 'LOG_UUCP', - 'LOG_NEWS', -) - -# Constant aliases if there are not defined -_ALIAS = ( - ('LOG_SYSLOG', 'LOG_DAEMON'), - ('LOG_CRON', 'LOG_DAEMON'), - ('LOG_NEWS', 'LOG_MAIL'), - ('LOG_UUCP', 'LOG_MAIL'), -) - -class SyslogConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/syslog.h']) -for key in _CONSTANTS: - setattr(SyslogConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(SyslogConfigure, key, DefinedConstantInteger(key)) - -config = configure(SyslogConfigure) -for key in _OPTIONAL_CONSTANTS: - if config[key] is None: - del config[key] -for alias, key in _ALIAS: - config.setdefault(alias, config[key]) - -all_constants = config.keys() -all_constants.sort() -config['ALL_CONSTANTS'] = tuple(all_constants) -dumpcache.dumpcache2('syslog', config) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -46,16 +47,16 @@ if parent is not None: self.parent = parent - def switch(self, *args): + def switch(self, *args, **kwds): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch('switch', args) + return self.__switch('switch', (args, kwds)) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" return self.__switch('throw', typ, val, tb) - def __switch(target, methodname, *args): + def __switch(target, methodname, *baseargs): current = getcurrent() # while not (target.__main or _continulet.is_pending(target)): @@ -65,9 +66,9 @@ greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw - _continulet.__init__(target, greenlet_func, *args) + _continulet.__init__(target, greenlet_func, *baseargs) methodname = 'switch' - args = () + baseargs = () target.__started = True break # already done, go to the parent instead @@ -75,14 +76,27 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) - args = unbound_method(current, *args, to=target) + args, kwds = unbound_method(current, *baseargs, to=target) finally: _tls.current = current # - if len(args) == 1: + if kwds: + if args: + return args, kwds + return kwds + elif len(args) == 1: return args[0] else: return args @@ -129,18 +143,22 @@ _tls.current = gmain def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: - res = greenlet.run(*args) + res = greenlet.run(*args, **kwds) except GreenletExit, e: res = e finally: _continuation.permute(greenlet, greenlet.parent) - return (res,) + return ((res,), None) def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -8,6 +8,7 @@ from ctypes import Structure, c_char_p, c_int, POINTER from ctypes_support import standard_c_lib as libc +import _structseq try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -23,32 +24,13 @@ ('gr_mem', POINTER(c_char_p)), ) -class Group(object): - def __init__(self, gr_name, gr_passwd, gr_gid, gr_mem): - self.gr_name = gr_name - self.gr_passwd = gr_passwd - self.gr_gid = gr_gid - self.gr_mem = gr_mem +class struct_group: + __metaclass__ = _structseq.structseqtype - def __getitem__(self, item): - if item == 0: - return self.gr_name - elif item == 1: - return self.gr_passwd - elif item == 2: - return self.gr_gid - elif item == 3: - return self.gr_mem - else: - raise IndexError(item) - - def __len__(self): - return 4 - - def __repr__(self): - return str((self.gr_name, self.gr_passwd, self.gr_gid, self.gr_mem)) - - # whatever else... + gr_name = _structseq.structseqfield(0) + gr_passwd = _structseq.structseqfield(1) + gr_gid = _structseq.structseqfield(2) + gr_mem = _structseq.structseqfield(3) libc.getgrgid.argtypes = [gid_t] libc.getgrgid.restype = POINTER(GroupStruct) @@ -71,8 +53,8 @@ while res.contents.gr_mem[i]: mem.append(res.contents.gr_mem[i]) i += 1 - return Group(res.contents.gr_name, res.contents.gr_passwd, - res.contents.gr_gid, mem) + return struct_group((res.contents.gr_name, res.contents.gr_passwd, + res.contents.gr_gid, mem)) @builtinify def getgrgid(gid): diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py --- a/lib_pypy/pyrepl/curses.py +++ b/lib_pypy/pyrepl/curses.py @@ -19,11 +19,15 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -# avoid importing the whole curses, if possible -try: +# If we are running on top of pypy, we import only _minimal_curses. +# Don't try to fall back to _curses, because that's going to use cffi +# and fall again more loudly. +import sys +if '__pypy__' in sys.builtin_module_names: # pypy case import _minimal_curses as _curses -except ImportError: +else: + # cpython case try: import _curses except ImportError: diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -1,3 +1,4 @@ +# this cffi version was rewritten based on the # ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides an interface to the Unix syslog library routines. @@ -9,34 +10,84 @@ if sys.platform == 'win32': raise ImportError("No syslog on Windows") -# load the platform-specific cache made by running syslog.ctc.py -from ctypes_config_cache._syslog_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes import c_int, c_char_p +from cffi import FFI try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +ffi = FFI() -# Real prototype is: -# void syslog(int priority, const char *format, ...); -# But we also need format ("%s") and one format argument (message) -_syslog = libc.syslog -_syslog.argtypes = (c_int, c_char_p, c_char_p) -_syslog.restype = None +ffi.cdef(""" +/* mandatory constants */ +#define LOG_EMERG ... +#define LOG_ALERT ... +#define LOG_CRIT ... +#define LOG_ERR ... +#define LOG_WARNING ... +#define LOG_NOTICE ... +#define LOG_INFO ... +#define LOG_DEBUG ... -_openlog = libc.openlog -_openlog.argtypes = (c_char_p, c_int, c_int) -_openlog.restype = None +#define LOG_PID ... +#define LOG_CONS ... +#define LOG_NDELAY ... -_closelog = libc.closelog -_closelog.argtypes = None -_closelog.restype = None +#define LOG_KERN ... +#define LOG_USER ... +#define LOG_MAIL ... +#define LOG_DAEMON ... +#define LOG_AUTH ... +#define LOG_LPR ... +#define LOG_LOCAL0 ... +#define LOG_LOCAL1 ... +#define LOG_LOCAL2 ... +#define LOG_LOCAL3 ... +#define LOG_LOCAL4 ... +#define LOG_LOCAL5 ... +#define LOG_LOCAL6 ... +#define LOG_LOCAL7 ... -_setlogmask = libc.setlogmask -_setlogmask.argtypes = (c_int,) -_setlogmask.restype = c_int +/* optional constants, gets defined to -919919 if missing */ +#define LOG_NOWAIT ... +#define LOG_PERROR ... + +/* aliased constants, gets defined as some other constant if missing */ +#define LOG_SYSLOG ... +#define LOG_CRON ... +#define LOG_UUCP ... +#define LOG_NEWS ... + +/* functions */ +void openlog(const char *ident, int option, int facility); +void syslog(int priority, const char *format, const char *string); +// NB. the signature of syslog() is specialized to the only case we use +void closelog(void); +int setlogmask(int mask); +""") + +lib = ffi.verify(""" +#include + +#ifndef LOG_NOWAIT +#define LOG_NOWAIT -919919 +#endif +#ifndef LOG_PERROR +#define LOG_PERROR -919919 +#endif +#ifndef LOG_SYSLOG +#define LOG_SYSLOG LOG_DAEMON +#endif +#ifndef LOG_CRON +#define LOG_CRON LOG_DAEMON +#endif +#ifndef LOG_UUCP +#define LOG_UUCP LOG_MAIL +#endif +#ifndef LOG_NEWS +#define LOG_NEWS LOG_MAIL +#endif +""") + _S_log_open = False _S_ident_o = None @@ -52,12 +103,17 @@ return None @builtinify -def openlog(ident=None, logoption=0, facility=LOG_USER): +def openlog(ident=None, logoption=0, facility=lib.LOG_USER): global _S_ident_o, _S_log_open if ident is None: ident = _get_argv() - _S_ident_o = c_char_p(ident) # keepalive - _openlog(_S_ident_o, logoption, facility) + if ident is None: + _S_ident_o = ffi.NULL + elif isinstance(ident, str): + _S_ident_o = ffi.new("char[]", ident) # keepalive + else: + raise TypeError("'ident' must be a string or None") + lib.openlog(_S_ident_o, logoption, facility) _S_log_open = True @builtinify @@ -69,19 +125,19 @@ # if log is not opened, open it now if not _S_log_open: openlog() - _syslog(priority, "%s", message) + lib.syslog(priority, "%s", message) @builtinify def closelog(): global _S_log_open, S_ident_o if _S_log_open: - _closelog() + lib.closelog() _S_log_open = False _S_ident_o = None @builtinify def setlogmask(mask): - return _setlogmask(mask) + return lib.setlogmask(mask) @builtinify def LOG_MASK(pri): @@ -91,8 +147,15 @@ def LOG_UPTO(pri): return (1 << (pri + 1)) - 1 -__all__ = ALL_CONSTANTS + ( +__all__ = [] + +for name in sorted(lib.__dict__): + if name.startswith('LOG_'): + value = getattr(lib, name) + if value != -919919: + globals()[name] = value + __all__.append(name) + +__all__ = tuple(__all__) + ( 'openlog', 'syslog', 'closelog', 'setlogmask', 'LOG_MASK', 'LOG_UPTO') - -del ALL_CONSTANTS diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() @@ -144,7 +144,7 @@ requires=module_dependencies.get(modname, []), suggests=module_suggests.get(modname, []), negation=modname not in essential_modules, - validator=get_module_validator(modname)) + ) #validator=get_module_validator(modname)) for modname in all_modules]), BoolOption("allworkingmodules", "use as many working modules as possible", diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -306,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -322,8 +335,35 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -32,11 +32,10 @@ * go to pypy/tool/release and run: force-builds.py /release/ * wait for builds to complete, make sure there are no failures -* run pypy/tool/release/make_release.py, this will build necessary binaries - and upload them to pypy.org +* upload binaries to https://bitbucket.org/pypy/pypy/downloads Following binaries should be built, however, we need more buildbots: - JIT: windows, linux, os/x + JIT: windows, linux, os/x, armhf, armel no JIT: windows, linux, os/x sandbox: linux, os/x diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,72 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture + +* Stacklet support on ARM + +* Interpreter improvements + +* Various numpy improvements + +* Bugfixes to cffi and ctypes + +* Bugfixes to the stacklet support + +* Improved logging performance + +* Faster sets for objects + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.1.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -5,6 +5,9 @@ .. this is a revision shortly after release-2.0 .. startrev: a13c07067613 +.. branch: ndarray-ptp +put and array.put + .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) @@ -61,3 +64,20 @@ .. branch: faster-str-of-bigint Improve performance of str(long). + +.. branch: ndarray-view +Add view to ndarray and zeroD arrays, not on dtype scalars yet + +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + +.. branch: identity-set +Faster sets for objects + +.. branch: inline-identityhash +Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,62 +2,32 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) +.. branch: fastjson +Fast json decoder written in RPython, about 3-4x faster than the pure Python +decoder which comes with the stdlib -.. branch: remove-array-smm -Remove multimethods in the arraymodule +.. branch: improve-str2charp +Improve the performance of I/O writing up to 15% by using memcpy instead of +copying char-by-char in str2charp and get_nonmovingbuffer -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. -.. branch: remove-set-smm -Remove multi-methods on sets +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. -.. branch: numpy-subarrays -Implement subarrays for numpy +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. -.. branch: remove-dict-smm -Remove multi-methods on dict +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative -.. branch: ndarray-round - -.. branch: faster-str-of-bigint -Improve performance of str(long). +.. branch: ssl_moving_write_buffer diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -196,6 +196,11 @@ print >> sys.stderr, "Python", sys.version raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -381,6 +386,7 @@ 'Q': (div_option, Ellipsis), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -881,15 +881,15 @@ assert "0 ('hi')" not in output.getvalue() def test_print_to(self): - exec """if 1: - from StringIO import StringIO - s = StringIO() - print >> s, "hi", "lovely!" - assert s.getvalue() == "hi lovely!\\n" - s = StringIO() - print >> s, "hi", "lovely!", - assert s.getvalue() == "hi lovely!" - """ in {} + exec """if 1: + from StringIO import StringIO + s = StringIO() + print >> s, "hi", "lovely!" + assert s.getvalue() == "hi lovely!\\n" + s = StringIO() + print >> s, "hi", "lovely!", + assert s.getvalue() == "hi lovely!" + """ in {} def test_assert_with_tuple_arg(self): try: diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -371,7 +371,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -111,7 +111,7 @@ enc = None if need_encoding: - enc = encoding + enc = encoding v = PyString_DecodeEscape(space, substr, enc) return space.wrap(v) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -636,11 +636,11 @@ assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL if i < 5: - def bomb(*args): - assert False, "shortcutting should have avoided this" + def bomb(*args): + assert False, "shortcutting should have avoided this" - code.funcrun = bomb - code.funcrun_obj = bomb + code.funcrun = bomb + code.funcrun_obj = bomb args_w = map(space.wrap, range(i)) w_res = space.call_function(fn, *args_w) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -73,7 +73,7 @@ def f(): def f(y): - return x + y + return x + y return f x = 1 @@ -85,7 +85,7 @@ if n: x = 42 def f(y): - return x + y + return x + y return f g0 = f(0).func_closure[0] diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -226,6 +226,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -102,26 +102,26 @@ } def pick_builtin(self, w_globals): - "Look up the builtin module to use from the __builtins__ global" - # pick the __builtins__ roughly in the same way CPython does it - # this is obscure and slow - space = self.space - try: - w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - else: - if w_builtin is space.builtin: # common case - return space.builtin - if space.isinstance_w(w_builtin, space.w_dict): + "Look up the builtin module to use from the __builtins__ global" + # pick the __builtins__ roughly in the same way CPython does it + # this is obscure and slow + space = self.space + try: + w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise From noreply at buildbot.pypy.org Tue Jul 23 11:14:45 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 23 Jul 2013 11:14:45 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Reintroduce shortcut in unicode's __new__ method that was removed for some reason. Message-ID: <20130723091445.404491C14BB@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65541:164d7408afeb Date: 2013-07-22 20:18 +0200 http://bitbucket.org/pypy/pypy/changeset/164d7408afeb/ Log: Reintroduce shortcut in unicode's __new__ method that was removed for some reason. diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -358,12 +358,15 @@ encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) # convoluted logic for the case when unicode subclass has a __unicode__ # method, we need to call this method - if (space.is_w(space.type(w_obj), space.w_unicode) or + is_precisely_unicode = space.is_w(space.type(w_obj), space.w_unicode) + if (is_precisely_unicode or (space.isinstance_w(w_obj, space.w_unicode) and space.findattr(w_obj, space.wrap('__unicode__')) is None)): if encoding is not None or errors is not None: raise OperationError(space.w_TypeError, space.wrap('decoding Unicode is not supported')) + if is_precisely_unicode and space.is_w(w_unicodetype, space.w_unicode): + return w_obj w_value = w_obj else: if encoding is None and errors is None: From noreply at buildbot.pypy.org Tue Jul 23 11:14:46 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 23 Jul 2013 11:14:46 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add space argument to _val() and use it throughout in stringmethods.py. Message-ID: <20130723091446.792741C14BB@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65542:ece5238a6d32 Date: 2013-07-22 21:27 +0200 http://bitbucket.org/pypy/pypy/changeset/ece5238a6d32/ Log: Add space argument to _val() and use it throughout in stringmethods.py. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -35,7 +35,7 @@ def _len(self): return len(self.data) - def _val(self): + def _val(self, space): return self.data def _op_val(self, w_other): diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -75,7 +75,7 @@ def _len(self): return len(self._value) - def _val(self): + def _val(self, space): return self._value def _op_val(self, space, w_other): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -18,7 +18,10 @@ def _len(self): raise NotImplementedError - def _val(self): + def _val(self, space): + raise NotImplementedError + + def _op_val(self, space, w_other): raise NotImplementedError def _sliced(self, space, s, start, stop, orig_obj): @@ -30,7 +33,7 @@ @specialize.arg(4) def _convert_idx_params(self, space, w_start, w_end, upper_bound=False): - value = self._val() + value = self._val(space) lenself = len(value) start, end = slicetype.unwrap_start_stop( space, lenself, w_start, w_end, upper_bound=upper_bound) @@ -38,7 +41,7 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val() == self._op_val(space, w_other)) + return space.newbool(self._val(space) == self._op_val(space, w_other)) except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -53,7 +56,7 @@ def descr_ne(self, space, w_other): try: - return space.newbool(self._val() != self._op_val(space, w_other)) + return space.newbool(self._val(space) != self._op_val(space, w_other)) except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -68,28 +71,28 @@ def descr_lt(self, space, w_other): try: - return space.newbool(self._val() < self._op_val(space, w_other)) + return space.newbool(self._val(space) < self._op_val(space, w_other)) except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented def descr_le(self, space, w_other): try: - return space.newbool(self._val() <= self._op_val(space, w_other)) + return space.newbool(self._val(space) <= self._op_val(space, w_other)) except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented def descr_gt(self, space, w_other): try: - return space.newbool(self._val() > self._op_val(space, w_other)) + return space.newbool(self._val(space) > self._op_val(space, w_other)) except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented def descr_ge(self, space, w_other): try: - return space.newbool(self._val() >= self._op_val(space, w_other)) + return space.newbool(self._val(space) >= self._op_val(space, w_other)) except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -101,10 +104,10 @@ # pass def descr_contains(self, space, w_sub): - return space.newbool(self._val().find(self._op_val(space, w_sub)) >= 0) + return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) def descr_add(self, space, w_other): - return self._new(self._val() + self._op_val(space, w_other)) + return self._new(self._val(space) + self._op_val(space, w_other)) def descr_mul(self, space, w_times): try: @@ -116,12 +119,12 @@ if times <= 0: return self.EMPTY if self._len() == 1: - return self._new(self._val()[0] * times) - return self._new(self._val() * times) + return self._new(self._val(space)[0] * times) + return self._new(self._val(space) * times) def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - selfvalue = self._value + selfvalue = self._val(space) length = len(selfvalue) start, stop, step, sl = w_index.indices4(space, length) if sl == 0: @@ -134,7 +137,7 @@ return self._new(str) index = space.getindex_w(w_index, space.w_IndexError, "string index") - selfvalue = self._val() + selfvalue = self._val(space) selflen = len(selfvalue) if index < 0: index += selflen @@ -145,7 +148,7 @@ return self._new(selfvalue[index]) def descr_getslice(self, space, w_start, w_stop): - selfvalue = self._val() + selfvalue = self._val(space) start, stop = normalize_simple_slice(space, len(selfvalue), w_start, w_stop) if start == stop: @@ -154,7 +157,7 @@ return self._sliced(space, selfvalue, start, stop, self) def descr_capitalize(self, space): - value = self._val() + value = self._val(space) if len(value) == 0: return self.EMPTY @@ -166,7 +169,7 @@ @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) def descr_center(self, space, width, w_fillchar): - value = self._value + value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: raise OperationError(space.w_TypeError, @@ -184,7 +187,7 @@ def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) - return wrapint(space, value.count(w_sub._value, start, end)) + return wrapint(space, value.count(self._op_val(space, w_sub), start, end)) def descr_decode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ @@ -202,7 +205,7 @@ @unwrap_spec(tabsize=int) def descr_expandtabs(self, space, tabsize=8): - value = self._val() + value = self._val(space) if not value: return self.EMPTY @@ -245,17 +248,17 @@ def descr_find(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) - res = value.find(w_sub._value, start, end) + res = value.find(self._op_val(space, w_sub), start, end) return space.wrap(res) def descr_rfind(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) - res = value.rfind(w_sub._value, start, end) + res = value.rfind(self._op_val(space, w_sub), start, end) return space.wrap(res) def descr_index(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) - res = value.find(w_sub._value, start, end) + res = value.find(self._op_val(space, w_sub), start, end) if res < 0: raise OperationError(space.w_ValueError, space.wrap("substring not found in string.index")) @@ -264,7 +267,7 @@ def descr_rindex(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) - res = value.rfind(w_sub._value, start, end) + res = value.rfind(self._op_val(space, w_sub), start, end) if res < 0: raise OperationError(space.w_ValueError, space.wrap("substring not found in string.rindex")) @@ -273,7 +276,7 @@ @specialize.arg(2) def _is_generic(self, space, fun): - v = self._value + v = self._val(space) if len(v) == 0: return space.w_False if len(v) == 1: @@ -299,7 +302,7 @@ return self._is_generic(space, self._isdigit) def descr_islower(self, space): - v = self._value + v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(c.islower()) @@ -315,7 +318,7 @@ return self._is_generic(space, self._isspace) def descr_istitle(self, space): - input = self._value + input = self._val(space) cased = False previous_is_cased = False @@ -336,7 +339,7 @@ return space.newbool(cased) def descr_isupper(self, space): - v = self._value + v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(c.isupper()) @@ -353,7 +356,7 @@ #if l is not None: # if len(l) == 1: # return space.wrap(l[0]) - # return space.wrap(self._value.join(l)) + # return space.wrap(self._val(space).join(l)) list_w = space.listview(w_list) size = len(list_w) @@ -372,7 +375,7 @@ @jit.look_inside_iff(lambda self, space, list_w, size: jit.loop_unrolling_heuristic(list_w, size)) def _str_join_many_items(self, space, list_w, size): - value = self._val() + value = self._val(space) prealloc_size = len(value) * (size - 1) for i in range(size): @@ -405,7 +408,7 @@ @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) def descr_ljust(self, space, width, w_fillchar): - value = self._value + value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: raise OperationError(space.w_TypeError, @@ -420,7 +423,7 @@ @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) def descr_rjust(self, space, width, w_fillchar): - value = self._value + value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: raise OperationError(space.w_TypeError, @@ -434,14 +437,14 @@ return space.wrap(value) def descr_lower(self, space): - value = self._val() + value = self._val(space) builder = self._builder(len(value)) for i in range(len(value)): builder.append(self._lower(value[i])) return self._new(builder.build()) def descr_partition(self, space, w_sub): - value = self._val() + value = self._val(space) sub = self._op_val(space, w_sub) if not sub: raise OperationError(space.w_ValueError, @@ -455,7 +458,7 @@ self._sliced(space, value, pos+len(sub), len(value), value)]) def descr_rpartition(self, space, w_sub): - value = self._val() + value = self._val(space) sub = self._op_val(space, w_sub) if not sub: raise OperationError(space.w_ValueError, @@ -470,7 +473,7 @@ @unwrap_spec(count=int) def descr_replace(self, space, w_old, w_new, count=-1): - input = self._val() + input = self._val(space) sub = self._op_val(space, w_old) by = self._op_val(space, w_new) try: @@ -483,7 +486,7 @@ @unwrap_spec(maxsplit=int) def descr_split(self, space, w_sep=None, maxsplit=-1): res = [] - value = self._val() + value = self._val(space) length = len(value) if space.is_none(w_sep): i = 0 @@ -523,7 +526,7 @@ @unwrap_spec(maxsplit=int) def descr_rsplit(self, space, w_sep=None, maxsplit=-1): res = [] - value = self._val() + value = self._val(space) if space.is_none(w_sep): i = len(value)-1 while True: @@ -565,7 +568,7 @@ @unwrap_spec(keepends=bool) def descr_splitlines(self, space, keepends=False): - data = self._value + data = self._val(space) selflen = len(data) strs = [] i = j = 0 @@ -616,8 +619,8 @@ def _strip(self, space, w_chars, left, right): "internal function called by str_xstrip methods" - value = self._value - u_chars = w_chars._value + value = self._val(space) + u_chars = self._op_val(space, w_chars) lpos = 0 rpos = len(value) @@ -636,7 +639,7 @@ def _strip_none(self, space, left, right): "internal function called by str_xstrip methods" - value = self._value + value = self._val(space) lpos = 0 rpos = len(value) @@ -669,7 +672,7 @@ return self._strip(space, w_chars, left=0, right=1) def descr_swapcase(self, space): - selfvalue = self._val() + selfvalue = self._val(space) builder = self._builder(len(selfvalue)) for i in range(len(selfvalue)): ch = selfvalue[i] @@ -682,7 +685,7 @@ return space.wrap(builder.build()) def descr_title(self, space): - selfval = self._val() + selfval = self._val(space) if len(selfval) == 0: return self @@ -711,7 +714,7 @@ space.w_ValueError, space.wrap("translation table must be 256 characters long")) - string = self._val() + string = self._val(space) deletechars = self._op_val(space, w_deletechars) if len(deletechars) == 0: buf = self._builder(len(string)) @@ -728,7 +731,7 @@ return self._new(buf.build()) def descr_upper(self, space): - value = self._val() + value = self._val(space) builder = self._builder(len(value)) for i in range(len(value)): builder.append(self._upper(value[i])) @@ -736,7 +739,7 @@ @unwrap_spec(width=int) def descr_zfill(self, space, width): - selfval = self._val() + selfval = self._val(space) if len(selfval) == 0: return self._new(self._chr('0') * width) num_zeros = width - len(selfval) @@ -756,4 +759,4 @@ return space.wrap(builder.build()) def descr_getnewargs(self, space): - return space.newtuple([self._new(self._val())]) + return space.newtuple([self._new(self._val(space))]) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -75,7 +75,7 @@ def _len(self): return len(self._value) - def _val(self): + def _val(self, space): return self._value def _op_val(self, space, w_other): From noreply at buildbot.pypy.org Tue Jul 23 11:34:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 23 Jul 2013 11:34:33 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: In get_unique_llfn(), ignore the "non-simple" calls and pick the one Message-ID: <20130723093433.6BA741C155C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65543:bf22340c9ecd Date: 2013-07-23 11:33 +0200 http://bitbucket.org/pypy/pypy/changeset/bf22340c9ecd/ Log: In get_unique_llfn(), ignore the "non-simple" calls and pick the one call which is a simple call. diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1021,8 +1021,9 @@ def specialize_call(self, hop): from rpython.rtyper.lltypesystem import lltype + args_v = hop.inputargs(lltype.Bool, lltype.Void, *hop.args_r[2:]) + args_v[1] = hop.args_r[1].get_unique_llfn() hop.exception_is_here() - args_v = hop.inputargs(lltype.Bool, lltype.Void, *hop.args_r[2:]) return hop.genop('jit_conditional_call', args_v) class Counters(object): diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -283,9 +283,13 @@ assert len(self.s_pbc.descriptions) == 1 # lowleveltype wouldn't be Void otherwise funcdesc, = self.s_pbc.descriptions - if len(self.callfamily.calltables) != 1: + tables = [] # find the simple call in the calltable + for key, table in self.callfamily.calltables.items(): + if not key[1] and not key[2] and not key[3]: + tables.append(table) + if len(tables) != 1: raise TyperError("cannot pass a function with various call shapes") - table, = self.callfamily.calltables.values() + table, = tables graphs = [] for row in table: if funcdesc in row: From noreply at buildbot.pypy.org Tue Jul 23 12:35:13 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 12:35:13 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: hg merge default Message-ID: <20130723103513.BD6891C14B6@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65544:80e0b308dedf Date: 2013-07-23 12:09 +0200 http://bitbucket.org/pypy/pypy/changeset/80e0b308dedf/ Log: hg merge default diff too long, truncating to 2000 out of 9665 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -219,32 +282,32 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -75,6 +76,15 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) @@ -147,5 +157,8 @@ _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -319,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -357,10 +357,13 @@ mainstream and where they will get help from many people.* *If anybody seriously wants to promote RPython anyway, he is welcome - to: we won't actively resist such a plan. This is open source, which - means that anybody is free to promote and develop anything; but it - also means that you must let us choose* not *to go into that direction - ourselves.* + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -32,11 +32,10 @@ * go to pypy/tool/release and run: force-builds.py /release/ * wait for builds to complete, make sure there are no failures -* run pypy/tool/release/make_release.py, this will build necessary binaries - and upload them to pypy.org +* upload binaries to https://bitbucket.org/pypy/pypy/downloads Following binaries should be built, however, we need more buildbots: - JIT: windows, linux, os/x + JIT: windows, linux, os/x, armhf, armel no JIT: windows, linux, os/x sandbox: linux, os/x diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,72 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture + +* Stacklet support on ARM + +* Interpreter improvements + +* Various numpy improvements + +* Bugfixes to cffi and ctypes + +* Bugfixes to the stacklet support + +* Improved logging performance + +* Faster sets for objects + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,77 +2,32 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: ndarray-ptp -put and array.put +.. branch: fastjson +Fast json decoder written in RPython, about 3-4x faster than the pure Python +decoder which comes with the stdlib -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) +.. branch: improve-str2charp +Improve the performance of I/O writing up to 15% by using memcpy instead of +copying char-by-char in str2charp and get_nonmovingbuffer -.. branch: remove-array-smm -Remove multimethods in the arraymodule +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. -.. branch: remove-set-smm -Remove multi-methods on sets +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. -.. branch: numpy-subarrays -Implement subarrays for numpy +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. -.. branch: remove-dict-smm -Remove multi-methods on dict - -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative -.. branch: ndarray-round - -.. branch: faster-str-of-bigint -Improve performance of str(long). - -.. branch: ndarray-view -Add view to ndarray and zeroD arrays, not on dtype scalars yet - -.. branch: numpypy-segfault -fix segfault caused by iterating over empty ndarrays - -.. branch: identity-set -Faster sets for objects - -.. branch: inline-identityhash -Inline the fast path of id() and hash() +.. branch: ssl_moving_write_buffer diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -196,6 +196,11 @@ print >> sys.stderr, "Python", sys.version raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -381,6 +386,7 @@ 'Q': (div_option, Ellipsis), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -881,15 +881,15 @@ assert "0 ('hi')" not in output.getvalue() def test_print_to(self): - exec """if 1: - from StringIO import StringIO - s = StringIO() - print >> s, "hi", "lovely!" - assert s.getvalue() == "hi lovely!\\n" - s = StringIO() - print >> s, "hi", "lovely!", - assert s.getvalue() == "hi lovely!" - """ in {} + exec """if 1: + from StringIO import StringIO + s = StringIO() + print >> s, "hi", "lovely!" + assert s.getvalue() == "hi lovely!\\n" + s = StringIO() + print >> s, "hi", "lovely!", + assert s.getvalue() == "hi lovely!" + """ in {} def test_assert_with_tuple_arg(self): try: diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -371,7 +371,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -111,7 +111,7 @@ enc = None if need_encoding: - enc = encoding + enc = encoding v = PyString_DecodeEscape(space, substr, enc) return space.wrap(v) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -636,11 +636,11 @@ assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL if i < 5: - def bomb(*args): - assert False, "shortcutting should have avoided this" + def bomb(*args): + assert False, "shortcutting should have avoided this" - code.funcrun = bomb - code.funcrun_obj = bomb + code.funcrun = bomb + code.funcrun_obj = bomb args_w = map(space.wrap, range(i)) w_res = space.call_function(fn, *args_w) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -73,7 +73,7 @@ def f(): def f(y): - return x + y + return x + y return f x = 1 @@ -85,7 +85,7 @@ if n: x = 42 def f(y): - return x + y + return x + y return f g0 = f(0).func_closure[0] diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -226,6 +226,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -102,26 +102,26 @@ } def pick_builtin(self, w_globals): - "Look up the builtin module to use from the __builtins__ global" - # pick the __builtins__ roughly in the same way CPython does it - # this is obscure and slow - space = self.space - try: - w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - else: - if w_builtin is space.builtin: # common case - return space.builtin - if space.isinstance_w(w_builtin, space.w_dict): + "Look up the builtin module to use from the __builtins__ global" + # pick the __builtins__ roughly in the same way CPython does it + # this is obscure and slow + space = self.space + try: + w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + else: + if w_builtin is space.builtin: # common case + return space.builtin + if space.isinstance_w(w_builtin, space.w_dict): return module.Module(space, None, w_builtin) - if isinstance(w_builtin, module.Module): - return w_builtin - # no builtin! make a default one. Give them None, at least. - builtin = module.Module(space, None) - space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) - return builtin + if isinstance(w_builtin, module.Module): + return w_builtin + # no builtin! make a default one. Give them None, at least. + builtin = module.Module(space, None) + space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) + return builtin def setup_after_space_initialization(self): """NOT_RPYTHON""" diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -36,6 +36,20 @@ } +class IntOpModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'int_add': 'interp_intop.int_add', + 'int_sub': 'interp_intop.int_sub', + 'int_mul': 'interp_intop.int_mul', + 'int_floordiv': 'interp_intop.int_floordiv', + 'int_mod': 'interp_intop.int_mod', + 'int_lshift': 'interp_intop.int_lshift', + 'int_rshift': 'interp_intop.int_rshift', + 'uint_rshift': 'interp_intop.uint_rshift', + } + + class Module(MixedModule): appleveldefs = { } @@ -67,6 +81,7 @@ "builders": BuildersModule, "time": TimeModule, "thread": ThreadModule, + "intop": IntOpModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_intop.py @@ -0,0 +1,39 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rarithmetic import r_uint, intmask + + + at unwrap_spec(n=int, m=int) +def int_add(space, n, m): + return space.wrap(llop.int_add(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_sub(space, n, m): + return space.wrap(llop.int_sub(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mul(space, n, m): + return space.wrap(llop.int_mul(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_floordiv(space, n, m): + return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mod(space, n, m): + return space.wrap(llop.int_mod(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_lshift(space, n, m): + return space.wrap(llop.int_lshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_rshift(space, n, m): + return space.wrap(llop.int_rshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def uint_rshift(space, n, m): + n = r_uint(n) + x = llop.uint_rshift(lltype.Unsigned, n, m) + return space.wrap(intmask(x)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_intop.py @@ -0,0 +1,104 @@ + + +class AppTestIntOp: + spaceconfig = dict(usemodules=['__pypy__']) + + def w_intmask(self, n): + import sys + n &= (sys.maxsize*2+1) + if n > sys.maxsize: + n -= 2*(sys.maxsize+1) + return int(n) + + def test_intmask(self): + import sys + assert self.intmask(sys.maxsize) == sys.maxsize + assert self.intmask(sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(-sys.maxsize-2) == sys.maxsize + N = 2 ** 128 + assert self.intmask(N+sys.maxsize) == sys.maxsize + assert self.intmask(N+sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(N-sys.maxsize-2) == sys.maxsize + + def test_int_add(self): + import sys + from __pypy__ import intop + assert intop.int_add(40, 2) == 42 + assert intop.int_add(sys.maxsize, 1) == -sys.maxsize-1 + assert intop.int_add(-2, -sys.maxsize) == sys.maxsize + + def test_int_sub(self): + import sys + from __pypy__ import intop + assert intop.int_sub(40, -2) == 42 + assert intop.int_sub(sys.maxsize, -1) == -sys.maxsize-1 + assert intop.int_sub(-2, sys.maxsize) == sys.maxsize + + def test_int_mul(self): + import sys + from __pypy__ import intop + assert intop.int_mul(40, -2) == -80 + assert intop.int_mul(-sys.maxsize, -sys.maxsize) == ( + self.intmask(sys.maxsize ** 2)) + + def test_int_floordiv(self): + import sys + from __pypy__ import intop + assert intop.int_floordiv(41, 3) == 13 + assert intop.int_floordiv(41, -3) == -13 + assert intop.int_floordiv(-41, 3) == -13 + assert intop.int_floordiv(-41, -3) == 13 + assert intop.int_floordiv(-sys.maxsize, -1) == sys.maxsize + assert intop.int_floordiv(sys.maxsize, -1) == -sys.maxsize + + def test_int_mod(self): + import sys + from __pypy__ import intop + assert intop.int_mod(41, 3) == 2 + assert intop.int_mod(41, -3) == 2 + assert intop.int_mod(-41, 3) == -2 + assert intop.int_mod(-41, -3) == -2 + assert intop.int_mod(-sys.maxsize, -1) == 0 + assert intop.int_mod(sys.maxsize, -1) == 0 + + def test_int_lshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + assert intop.int_lshift(42, 3) == 42 << 3 + assert intop.int_lshift(0, 3333) == 0 + assert intop.int_lshift(1, bits-2) == 1 << (bits-2) + assert intop.int_lshift(1, bits-1) == -sys.maxsize-1 == (-1) << (bits-1) + assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) + assert intop.int_lshift(-1, bits-1) == -sys.maxsize-1 + assert intop.int_lshift(sys.maxsize // 3, 2) == ( + self.intmask((sys.maxsize // 3) << 2)) + assert intop.int_lshift(-sys.maxsize // 3, 2) == ( + self.intmask((-sys.maxsize // 3) << 2)) + + def test_int_rshift(self): + from __pypy__ import intop + assert intop.int_rshift(42, 3) == 42 >> 3 + assert intop.int_rshift(-42, 3) == (-42) >> 3 + assert intop.int_rshift(0, 3333) == 0 + assert intop.int_rshift(-1, 0) == -1 + assert intop.int_rshift(-1, 1) == -1 + + def test_uint_rshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + N = 1 << bits + assert intop.uint_rshift(42, 3) == 42 >> 3 + assert intop.uint_rshift(-42, 3) == (N-42) >> 3 + assert intop.uint_rshift(0, 3333) == 0 + assert intop.uint_rshift(-1, 0) == -1 + assert intop.uint_rshift(-1, 1) == sys.maxsize + assert intop.uint_rshift(-1, bits-2) == 3 + assert intop.uint_rshift(-1, bits-1) == 1 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1219,6 +1219,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1238,6 +1286,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") @@ -2760,6 +2832,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -52,7 +52,8 @@ HAS = rffi_platform.Has("setupterm") if rffi_platform.configure(CConfig)['HAS']: return eci - raise ImportError("failed to guess where ncurses is installed") + raise ImportError("failed to guess where ncurses is installed. " + "You might need to install libncurses5-dev or similar.") eci = guess_eci() diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/__init__.py @@ -0,0 +1,10 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """fast json implementation""" + + appleveldefs = {} + + interpleveldefs = { + 'loads' : 'interp_decoder.loads', + } diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -0,0 +1,404 @@ +import sys +import math +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize +from rpython.rlib import rfloat +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import unicodehelper +from rpython.rtyper.annlowlevel import llstr, hlunicode + +OVF_DIGITS = len(str(sys.maxint)) + +def is_whitespace(ch): + return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' + +# precomputing negative powers of 10 is MUCH faster than using e.g. math.pow +# at runtime +NEG_POW_10 = [10.0**-i for i in range(16)] +def neg_pow_10(x, exp): + if exp >= len(NEG_POW_10): + return 0.0 + return x * NEG_POW_10[exp] + +def strslice2unicode_latin1(s, start, end): + """ + Convert s[start:end] to unicode. s is supposed to be an RPython string + encoded in latin-1, which means that the numeric value of each char is the + same as the corresponding unicode code point. + + Internally it's implemented at the level of low-level helpers, to avoid + the extra copy we would need if we take the actual slice first. + + No bound checking is done, use carefully. + """ + from rpython.rtyper.annlowlevel import llstr, hlunicode + from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE + from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar + length = end-start + ll_s = llstr(s) + ll_res = malloc(UNICODE, length) + ll_res.hash = 0 + for i in range(length): + ch = ll_s.chars[start+i] + ll_res.chars[i] = cast_primitive(UniChar, ch) + return hlunicode(ll_res) + +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 +class JSONDecoder(object): + def __init__(self, space, s): + self.space = space + self.s = s + # we put our string in a raw buffer so: + # 1) we automatically get the '\0' sentinel at the end of the string, + # which means that we never have to check for the "end of string" + # 2) we can pass the buffer directly to strtod + self.ll_chars = rffi.str2charp(s) + self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') + self.pos = 0 + self.last_type = TYPE_UNKNOWN + + def close(self): + rffi.free_charp(self.ll_chars) + lltype.free(self.end_ptr, flavor='raw') + + def getslice(self, start, end): + assert start >= 0 + assert end >= 0 + return self.s[start:end] + + def skip_whitespace(self, i): + while True: + ch = self.ll_chars[i] + if is_whitespace(ch): + i+=1 + else: + break + return i + + @specialize.arg(1) + def _raise(self, msg, *args): + raise operationerrfmt(self.space.w_ValueError, msg, *args) + + def decode_any(self, i): + i = self.skip_whitespace(i) + ch = self.ll_chars[i] + if ch == '"': + return self.decode_string(i+1) + elif ch == '[': + return self.decode_array(i+1) + elif ch == '{': + return self.decode_object(i+1) + elif ch == 'n': + return self.decode_null(i+1) + elif ch == 't': + return self.decode_true(i+1) + elif ch == 'f': + return self.decode_false(i+1) + elif ch == 'I': + return self.decode_infinity(i+1) + elif ch == 'N': + return self.decode_nan(i+1) + elif ch == '-': + if self.ll_chars[i+1] == 'I': + return self.decode_infinity(i+2, sign=-1) + return self.decode_numeric(i) + elif ch.isdigit(): + return self.decode_numeric(i) + else: + self._raise("No JSON object could be decoded: unexpected '%s' at char %d", + ch, self.pos) + + def decode_null(self, i): + if (self.ll_chars[i] == 'u' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 'l'): + self.pos = i+3 + return self.space.w_None + self._raise("Error when decoding null at char %d", i) + + def decode_true(self, i): + if (self.ll_chars[i] == 'r' and + self.ll_chars[i+1] == 'u' and + self.ll_chars[i+2] == 'e'): + self.pos = i+3 + return self.space.w_True + self._raise("Error when decoding true at char %d", i) + + def decode_false(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 's' and + self.ll_chars[i+3] == 'e'): + self.pos = i+4 + return self.space.w_False + self._raise("Error when decoding false at char %d", i) + + def decode_infinity(self, i, sign=1): + if (self.ll_chars[i] == 'n' and + self.ll_chars[i+1] == 'f' and + self.ll_chars[i+2] == 'i' and + self.ll_chars[i+3] == 'n' and + self.ll_chars[i+4] == 'i' and + self.ll_chars[i+5] == 't' and + self.ll_chars[i+6] == 'y'): + self.pos = i+7 + return self.space.wrap(rfloat.INFINITY * sign) + self._raise("Error when decoding Infinity at char %d", i) + + def decode_nan(self, i): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'N'): + self.pos = i+2 + return self.space.wrap(rfloat.NAN) + self._raise("Error when decoding NaN at char %d", i) + + def decode_numeric(self, i): + start = i + i, ovf_maybe, intval = self.parse_integer(i) + # + # check for the optional fractional part + ch = self.ll_chars[i] + if ch == '.': + if not self.ll_chars[i+1].isdigit(): + self._raise("Expected digit at char %d", i+1) + return self.decode_float(start) + elif ch == 'e' or ch == 'E': + return self.decode_float(start) + elif ovf_maybe: + return self.decode_int_slow(start) + + self.pos = i + return self.space.wrap(intval) + + def decode_float(self, i): + from rpython.rlib import rdtoa + start = rffi.ptradd(self.ll_chars, i) + floatval = rdtoa.dg_strtod(start, self.end_ptr) From noreply at buildbot.pypy.org Tue Jul 23 12:55:18 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 12:55:18 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill rpython.jit.codewriter.support.OOtypeHelpers Message-ID: <20130723105518.D61AF1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65545:35bee261e492 Date: 2013-07-23 12:54 +0200 http://bitbucket.org/pypy/pypy/changeset/35bee261e492/ Log: Kill rpython.jit.codewriter.support.OOtypeHelpers diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -640,9 +640,6 @@ op.result) def _array_of_voids(self, ARRAY): - #if isinstance(ARRAY, ootype.Array): - # return ARRAY.ITEM == ootype.Void - #else: return ARRAY.OF == lltype.Void def rewrite_op_getfield(self, op): diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -15,7 +15,6 @@ from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory, rstr as ll_rstr, rdict as ll_rdict from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.module import ll_math -from rpython.rtyper.ootypesystem import ootype, rdict as oo_rdict from rpython.translator.simplify import get_funcobj from rpython.translator.translator import TranslationContext from rpython.translator.unsimplify import split_block @@ -75,7 +74,7 @@ # another block, so the set of alive_v will be different. methname = op.args[0].value assert methname == 'jit_merge_point', ( - "reds='auto' is supported only for jit drivers which " + "reds='auto' is supported only for jit drivers which " "calls only jit_merge_point. Found a call to %s" % methname) # # compute the set of live variables across the jit_marker @@ -698,78 +697,6 @@ def _ll_1_gc_add_memory_pressure(num): llop.gc_add_memory_pressure(lltype.Void, num) -class OOtypeHelpers: - - # ---------- dict ---------- - - def _ll_0_newdict(DICT): - return oo_rdict.ll_newdict(DICT) - _ll_0_newdict.need_result_type = True - - def _ll_3_dict_setitem(d, key, value): - d.ll_set(key, value) - - def _ll_2_dict_contains(d, key): - return d.ll_contains(key) - - def _ll_1_dict_clear(d): - d.ll_clear() - - _ll_2_dict_getitem = oo_rdict.ll_dict_getitem - _ll_2_dict_delitem = oo_rdict.ll_dict_delitem - _ll_3_dict_setdefault = oo_rdict.ll_dict_setdefault - _ll_3_dict_get = oo_rdict.ll_dict_get - _ll_1_dict_copy = oo_rdict.ll_dict_copy - _ll_2_dict_update = oo_rdict.ll_dict_update - - # ---------- dict keys(), values(), items(), iter ---------- - - _ll_1_dict_keys = oo_rdict.ll_dict_keys - _ll_1_dict_values = oo_rdict.ll_dict_values - _ll_1_dict_items = oo_rdict.ll_dict_items - _ll_1_dict_keys .need_result_type = True - _ll_1_dict_values.need_result_type = True - _ll_1_dict_items .need_result_type = True - - _dictnext_keys = staticmethod(oo_rdict.ll_dictnext_group['keys']) - _dictnext_values = staticmethod(oo_rdict.ll_dictnext_group['values']) - _dictnext_items = staticmethod(oo_rdict.ll_dictnext_group['items']) - - def _ll_1_dictiter_nextkeys(iter): - return OOtypeHelpers._dictnext_keys(None, iter) - def _ll_1_dictiter_nextvalues(iter): - return OOtypeHelpers._dictnext_values(None, iter) - def _ll_1_dictiter_nextitems(RES, iter): - return OOtypeHelpers._dictnext_items(RES, iter) - _ll_1_dictiter_nextitems.need_result_type = True - - # --------------- oostring and oounicode ---------------- - - def _ll_2_oostring_signed_foldable(n, base): - return ootype.oostring(n, base) - - def _ll_1_oostring_char_foldable(ch): - return ootype.oostring(ch, -1) - - def _ll_1_oostring_unsigned_foldable(n): - return ootype.oostring(n, -1) - - def _ll_1_oostring_string_foldable(s): - return ootype.oostring(s, -1) - - def _ll_1_oostring_root_foldable(s): - return ootype.oostring(s, -1) - - def _ll_2_oounicode_signed_foldable(n, base): - return ootype.oounicode(n, base) - - def _ll_1_oounicode_unichar_foldable(ch): - return ootype.oounicode(ch, -1) - - def _ll_1_oounicode_string_foldable(s): - return ootype.oounicode(s, -1) - -# ------------------------------------------------------- def setup_extra_builtin(rtyper, oopspec_name, nb_args, extra=None): name = '_ll_%d_%s' % (nb_args, oopspec_name.replace('.', '_')) @@ -778,11 +705,7 @@ try: wrapper = globals()[name] except KeyError: - if rtyper.type_system.name == 'lltypesystem': - Helpers = LLtypeHelpers - else: - Helpers = OOtypeHelpers - wrapper = getattr(Helpers, name).im_func + wrapper = getattr(LLtypeHelpers, name).im_func if extra is not None: wrapper = wrapper(*extra) return wrapper @@ -823,16 +746,6 @@ normalized_opargs = normalize_opargs(argtuple, opargs) return oopspec, normalized_opargs -def get_oostring_oopspec(op): - T = op.args[0].concretetype - if T is not ootype.Signed: - args = op.args[:-1] - else: - args = op.args - if isinstance(T, ootype.Instance): - T = ootype.ROOT - return '%s_%s_foldable' % (op.opname, T._name.lower()), args - def get_identityhash_oopspec(op): return 'gc_identityhash', op.args @@ -858,15 +771,10 @@ def decode_builtin_call(op): - if op.opname == 'oosend': - SELFTYPE, name, opargs = decompose_oosend(op) - return get_send_oopspec(SELFTYPE, name), opargs - elif op.opname == 'direct_call': + if op.opname == 'direct_call': fnobj = get_funcobj(op.args[0].value) opargs = op.args[1:] return get_call_oopspec_opargs(fnobj, opargs) - elif op.opname in ('oostring', 'oounicode'): - return get_oostring_oopspec(op) elif op.opname == 'gc_identityhash': return get_identityhash_oopspec(op) elif op.opname == 'gc_id': From noreply at buildbot.pypy.org Tue Jul 23 13:11:09 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 23 Jul 2013 13:11:09 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix a few tests Message-ID: <20130723111109.96AFA1C02A1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65546:6cfeb749f64c Date: 2013-07-23 13:10 +0200 http://bitbucket.org/pypy/pypy/changeset/6cfeb749f64c/ Log: fix a few tests diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -112,10 +112,12 @@ descr = op.getdescr() if not we_are_translated() and descr is None: return - llref = cast_instance_to_gcref(descr) + llref = rgc.cast_instance_to_gcref(descr) new_llref = rgc._make_sure_does_not_move(llref) new_d = rgc.try_cast_gcref_to_instance(AbstractDescr, new_llref) - op.setdescr(new_d) + if we_are_translated(): + # tests don't allow this + op.setdescr(new_d) gcrefs_output_list.append(new_llref) def rewrite_assembler(self, cpu, operations, gcrefs_output_list): diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -344,7 +344,7 @@ self.check_rewrite(""" [i1, i2] i3 = getfield_raw(i1, descr=tydescr) - keepalive(i3) # random ignored operation + keepalive(i3) i4 = getfield_raw(i2, descr=tydescr) jump(i3, i4) """, """ diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -529,6 +529,7 @@ # self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) + operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) if logger: @@ -1021,6 +1022,8 @@ def genop_ptr_eq(self, op, arglocs, result_loc): + if not self.cpu.gc_ll_descr.stm: + self.genop_int_eq(op, arglocs, result_loc) assert self.cpu.gc_ll_descr.stm rl = result_loc.lowest8bits() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) @@ -1029,6 +1032,8 @@ self.mc.MOVZX8_rr(result_loc.value, rl.value) def genop_ptr_ne(self, op, arglocs, result_loc): + if not self.cpu.gc_ll_descr.stm: + self.genop_int_ne(op, arglocs, result_loc) assert self.cpu.gc_ll_descr.stm rl = result_loc.lowest8bits() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) @@ -1038,6 +1043,10 @@ def genop_guard_ptr_eq(self, op, guard_op, guard_token, arglocs, result_loc): + if not self.cpu.gc_ll_descr.stm: + self.genop_guard_int_eq(op, guard_op, guard_token, + arglocs, result_loc) + assert not self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) self.mc.TEST_rr(eax.value, eax.value) @@ -1048,6 +1057,10 @@ def genop_guard_ptr_ne(self, op, guard_op, guard_token, arglocs, result_loc): + if not self.cpu.gc_ll_descr.stm: + self.genop_guard_int_ne(op, guard_op, guard_token, + arglocs, result_loc) + assert not self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) self.mc.TEST_rr(eax.value, eax.value) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -30,7 +30,7 @@ """ Returns a non-moving reference to an object (only use if obj is already OLD!) """ - return lltype.nullptr(llmemory.GCREF) + return obj # ____________________________________________________________ # Annotation and specialization @@ -119,7 +119,7 @@ on objects that are already a bit old, so have a chance to be already non-movable.""" if not we_are_translated(): - return + return p i = 0 while can_move(p): if i > 6: From noreply at buildbot.pypy.org Tue Jul 23 13:30:35 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 13:30:35 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: clean up test_optimizebasic.py Message-ID: <20130723113035.45F451C13EE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65547:b3f4a6721e57 Date: 2013-07-23 13:00 +0200 http://bitbucket.org/pypy/pypy/changeset/b3f4a6721e57/ Log: clean up test_optimizebasic.py diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -95,7 +95,6 @@ elif isinstance(arg, BoxInt): return 'i' + str(mv) elif isinstance(arg, self.ts.ConstRef): - # XXX for ootype, this should also go through get_name_from_address return 'ConstPtr(ptr' + str(mv) + ')' elif isinstance(arg, self.ts.BoxRef): return 'p' + str(mv) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -3,13 +3,12 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) from rpython.jit.metainterp.history import TargetToken, JitCellToken -from rpython.jit.metainterp.test.test_compile import FakeLogger import rpython.jit.metainterp.optimizeopt.optimizer as optimizeopt import rpython.jit.metainterp.optimizeopt.virtualize as virtualize from rpython.jit.metainterp.optimize import InvalidLoop -from rpython.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt, get_const_ptr_for_string -from rpython.jit.metainterp import executor, compile, resume, history -from rpython.jit.metainterp.resoperation import rop, opname, ResOperation +from rpython.jit.metainterp.history import ConstInt, BoxInt, get_const_ptr_for_string +from rpython.jit.metainterp import executor, compile, resume +from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.rarithmetic import LONG_BIT def test_store_final_boxes_in_guard(): @@ -5105,32 +5104,3 @@ class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass - -##class TestOOtype(BaseTestOptimizeBasic, OOtypeMixin): - -## def test_instanceof(self): -## ops = """ -## [i0] -## p0 = new_with_vtable(ConstClass(node_vtable)) -## i1 = instanceof(p0, descr=nodesize) -## jump(i1) -## """ -## expected = """ -## [i0] -## jump(1) -## """ -## self.optimize_loop(ops, expected) - -## def test_instanceof_guard_class(self): -## ops = """ -## [i0, p0] -## guard_class(p0, ConstClass(node_vtable)) [] -## i1 = instanceof(p0, descr=nodesize) -## jump(i1, p0) -## """ -## expected = """ -## [i0, p0] -## guard_class(p0, ConstClass(node_vtable)) [] -## jump(1, p0) -## """ -## self.optimize_loop(ops, expected) From noreply at buildbot.pypy.org Tue Jul 23 13:30:36 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 13:30:36 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: clean up rpython/jit/metainterp/ Message-ID: <20130723113036.70F531C13EE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65548:0cfea4acf443 Date: 2013-07-23 13:29 +0200 http://bitbucket.org/pypy/pypy/changeset/0cfea4acf443/ Log: clean up rpython/jit/metainterp/ diff --git a/rpython/jit/metainterp/quasiimmut.py b/rpython/jit/metainterp/quasiimmut.py --- a/rpython/jit/metainterp/quasiimmut.py +++ b/rpython/jit/metainterp/quasiimmut.py @@ -7,8 +7,6 @@ def get_mutate_field_name(fieldname): if fieldname.startswith('inst_'): # lltype return 'mutate_' + fieldname[5:] - elif fieldname.startswith('o'): # ootype - return 'mutate_' + fieldname[1:] else: raise AssertionError(fieldname) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -469,10 +469,6 @@ 'UNICODELEN/1', 'UNICODEGETITEM/2', # - # ootype operations - #'INSTANCEOF/1db', - #'SUBCLASSOF/2b', - # '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- 'GETARRAYITEM_GC/2d', @@ -503,7 +499,6 @@ 'SETFIELD_RAW/2d', 'STRSETITEM/3', 'UNICODESETITEM/3', - #'RUNTIMENEW/1', # ootype operation 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.) 'DEBUG_MERGE_POINT/*', # debugging only @@ -522,8 +517,6 @@ 'CALL_MAY_FORCE/*d', 'CALL_LOOPINVARIANT/*d', 'CALL_RELEASE_GIL/*d', # release the GIL and "close the stack" for asmgcc - #'OOSEND', # ootype operation - #'OOSEND_PURE', # ootype operation 'CALL_PURE/*d', # removed before it's passed to the backend 'CALL_MALLOC_GC/*d', # like CALL, but NULL => propagate MemoryError 'CALL_MALLOC_NURSERY/1', # nursery malloc, const number of bytes, zeroed diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -1,7 +1,5 @@ import sys -import py - from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import (llhelper, MixLevelHelperAnnotator, @@ -977,8 +975,6 @@ op.args[:3] = [closures[key]] def rewrite_force_virtual(self, vrefinfo): - if self.cpu.ts.name != 'lltype': - py.test.skip("rewrite_force_virtual: port it to ootype") all_graphs = self.translator.graphs vrefinfo.replace_force_virtual_with_call(all_graphs) From noreply at buildbot.pypy.org Tue Jul 23 13:31:38 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 23 Jul 2013 13:31:38 +0200 (CEST) Subject: [pypy-commit] pypy default: start writing the announcement for the 2.1 beta2 Message-ID: <20130723113138.9FA411C13EE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65549:2048a6b6e296 Date: 2013-07-23 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/2048a6b6e296/ Log: start writing the announcement for the 2.1 beta2 diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,60 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta does not add any new features to the 2.1 release but contains several bugfixes. + +Highlights +========== + +* On packaging compile the CFFI tk extension. + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. From noreply at buildbot.pypy.org Tue Jul 23 13:31:39 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 23 Jul 2013 13:31:39 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: bump version numbers Message-ID: <20130723113139.CD3FA1C13EE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65550:f5220e6c0aa1 Date: 2013-07-23 12:04 +0200 http://bitbucket.org/pypy/pypy/changeset/f5220e6c0aa1/ Log: bump version numbers diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-beta1" +#define PYPY_VERSION "2.1.0-beta2" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "beta", 1) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 1, 0, "beta", 2) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Tue Jul 23 13:31:41 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 23 Jul 2013 13:31:41 +0200 (CEST) Subject: [pypy-commit] pypy default: reorder Message-ID: <20130723113141.041521C13EE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65551:ef8730c2c417 Date: 2013-07-23 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/ef8730c2c417/ Log: reorder diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -8,8 +8,6 @@ Highlights ========== -* On packaging compile the CFFI tk extension. - * Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). * Fixed issue `1552`_: GreenletExit should inherit from BaseException @@ -22,6 +20,8 @@ split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS. +* On packaging, compile the CFFI tk extension. + .. _`1533`: https://bugs.pypy.org/issue1533 .. _`1552`: https://bugs.pypy.org/issue1552 .. _`1537`: https://bugs.pypy.org/issue1537 From noreply at buildbot.pypy.org Tue Jul 23 13:31:42 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 23 Jul 2013 13:31:42 +0200 (CEST) Subject: [pypy-commit] pypy default: wording Message-ID: <20130723113142.353E11C13EE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65552:5d62d544b47e Date: 2013-07-23 13:30 +0200 http://bitbucket.org/pypy/pypy/changeset/5d62d544b47e/ Log: wording diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -3,7 +3,7 @@ =============== We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. -This beta does not add any new features to the 2.1 release but contains several bugfixes. +This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. Highlights ========== @@ -20,7 +20,7 @@ split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS. -* On packaging, compile the CFFI tk extension. +* During packaging, compile the CFFI tk extension. .. _`1533`: https://bugs.pypy.org/issue1533 .. _`1552`: https://bugs.pypy.org/issue1552 From noreply at buildbot.pypy.org Tue Jul 23 13:35:54 2013 From: noreply at buildbot.pypy.org (bivab) Date: Tue, 23 Jul 2013 13:35:54 +0200 (CEST) Subject: [pypy-commit] pypy default: update docs Message-ID: <20130723113554.B733B1C14B6@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65553:f38b470ae432 Date: 2013-07-23 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/f38b470ae432/ Log: update docs diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads From noreply at buildbot.pypy.org Tue Jul 23 14:11:30 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 14:11:30 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill get_funcobj() because it is now trivial Message-ID: <20130723121130.7CA2B1C02A1@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65554:4cd0c0e9db23 Date: 2013-07-23 14:10 +0200 http://bitbucket.org/pypy/pypy/changeset/4cd0c0e9db23/ Log: Kill get_funcobj() because it is now trivial diff --git a/rpython/jit/backend/llsupport/support.py b/rpython/jit/backend/llsupport/support.py --- a/rpython/jit/backend/llsupport/support.py +++ b/rpython/jit/backend/llsupport/support.py @@ -1,4 +1,3 @@ -from rpython.translator.simplify import get_funcobj from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.extregistry import ExtRegistryEntry @@ -28,7 +27,7 @@ i += 1 return real_args - funcobj = get_funcobj(fnptr) + funcobj = fnptr._obj if hasattr(funcobj, 'graph'): # cache the llinterp; otherwise the remember_malloc/remember_free # done on the LLInterpreter don't match diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -8,7 +8,7 @@ from rpython.jit.codewriter.effectinfo import (VirtualizableAnalyzer, QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) -from rpython.translator.simplify import get_funcobj, get_functype +from rpython.translator.simplify import get_functype from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer @@ -86,7 +86,7 @@ if is_candidate is None: is_candidate = self.is_candidate if op.opname == 'direct_call': - funcobj = get_funcobj(op.args[0].value) + funcobj = op.args[0].value._obj graph = funcobj.graph if is_candidate(graph): return [graph] # common case: look inside this graph @@ -130,7 +130,7 @@ funcptr = op.args[0].value if self.jitdriver_sd_from_portal_runner_ptr(funcptr) is not None: return 'recursive' - funcobj = get_funcobj(funcptr) + funcobj = funcptr._obj if getattr(funcobj, 'graph', None) is None: return 'residual' targetgraph = funcobj.graph @@ -222,7 +222,7 @@ loopinvariant = False call_release_gil_target = llmemory.NULL if op.opname == "direct_call": - funcobj = get_funcobj(op.args[0].value) + funcobj = op.args[0].value._obj assert getattr(funcobj, 'calling_conv', 'c') == 'c', ( "%r: getcalldescr() with a non-default call ABI" % (op,)) func = getattr(funcobj, '_callable', None) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -14,7 +14,6 @@ from rpython.rlib.rgc import lltype_is_gc from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rclass, rffi from rpython.rtyper.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY -from rpython.translator.simplify import get_funcobj from rpython.translator.unsimplify import varoftype class UnsupportedMallocFlags(Exception): @@ -1444,7 +1443,7 @@ # because functions that are neither nonneg nor fast don't have an # oopspec any more # xxx break of abstraction: - func = get_funcobj(op.args[0].value)._callable + func = op.args[0].value._obj._callable # base hints on the name of the ll function, which is a bit xxx-ish # but which is safe for now assert func.func_name.startswith('ll_') diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -15,7 +15,6 @@ from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory, rstr as ll_rstr, rdict as ll_rdict from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.lltypesystem.module import ll_math -from rpython.translator.simplify import get_funcobj from rpython.translator.translator import TranslationContext from rpython.translator.unsimplify import split_block @@ -149,7 +148,7 @@ def maybe_on_top_of_llinterp(rtyper, fnptr): # Run a generated graph on top of the llinterp for testing. # When translated, this just returns the fnptr. - funcobj = get_funcobj(fnptr) + funcobj = fnptr._obj if hasattr(funcobj, 'graph'): llinterp = LLInterpreter(rtyper) #, exc_data_ptr=exc_data_ptr) def on_top_of_llinterp(*args): @@ -772,7 +771,7 @@ def decode_builtin_call(op): if op.opname == 'direct_call': - fnobj = get_funcobj(op.args[0].value) + fnobj = op.args[0].value._obj opargs = op.args[1:] return get_call_oopspec_opargs(fnobj, opargs) elif op.opname == 'gc_identityhash': diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -247,7 +247,7 @@ test_ajit::test_inline_jit_merge_point """ from rpython.translator.backendopt.inline import ( - get_funcobj, inlinable_static_callers, auto_inlining) + inlinable_static_callers, auto_inlining) jmp_calls = {} def get_jmp_call(graph, _inline_jit_merge_point_): @@ -277,7 +277,7 @@ msg = ("The first operation of an _inline_jit_merge_point_ graph must be " "a direct_call to the function passed to @jitdriver.inline()") assert op_jmp_call.opname == 'direct_call', msg - jmp_funcobj = get_funcobj(op_jmp_call.args[0].value) + jmp_funcobj = op_jmp_call.args[0].value._obj assert jmp_funcobj._callable is _inline_jit_merge_point_, msg jmp_block.operations.remove(op_jmp_call) return op_jmp_call, jmp_funcobj.graph diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1382,7 +1382,6 @@ assert res == True def test_immutable_list_out_of_instance(self): - from rpython.translator.simplify import get_funcobj for immutable_fields in (["a", "b"], ["a", "b", "y[*]"]): class A(object): _immutable_fields_ = immutable_fields @@ -1401,7 +1400,7 @@ block = graph.startblock op = block.operations[-1] assert op.opname == 'direct_call' - func = get_funcobj(op.args[0].value)._callable + func = op.args[0].value._obj._callable assert ('foldable' in func.func_name) == \ ("y[*]" in immutable_fields) diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -3,7 +3,6 @@ from rpython.rtyper.lltypesystem.lloperation import LL_OPERATIONS from rpython.tool.ansi_print import ansi_log from rpython.translator.backendopt import graphanalyze -from rpython.translator.simplify import get_funcobj log = py.log.Producer("canraise") py.log.setconsumer("canraise", ansi_log) @@ -18,7 +17,7 @@ return True def analyze_external_call(self, op, seen=None): - fnobj = get_funcobj(op.args[0].value) + fnobj = op.args[0].value._obj return getattr(fnobj, 'canraise', True) def analyze_external_method(self, op, TYPE, meth): diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,4 +1,4 @@ -from rpython.translator.simplify import get_graph, get_funcobj +from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -52,7 +52,7 @@ return self.bottom_result() def analyze_external_call(self, op, seen=None): - funcobj = get_funcobj(op.args[0].value) + funcobj = op.args[0].value._obj result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -8,7 +8,7 @@ from rpython.translator.backendopt import removenoops from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.support import log, find_loop_blocks -from rpython.translator.simplify import join_blocks, cleanup_graph, get_graph, get_funcobj +from rpython.translator.simplify import join_blocks, cleanup_graph, get_graph from rpython.translator.unsimplify import copyvar, split_block @@ -63,7 +63,7 @@ for block in graph.iterblocks(): for i, op in enumerate(block.operations): if op.opname == "direct_call": - funcobj = get_funcobj(op.args[0].value) + funcobj = op.args[0].value._obj elif op.opname == "oosend": funcobj = get_meth_from_oosend(op) if funcobj is None: @@ -218,7 +218,7 @@ def get_graph_from_op(self, op): assert op.opname in ('direct_call', 'oosend') if op.opname == 'direct_call': - return get_funcobj(self.op.args[0].value).graph + return self.op.args[0].value._obj.graph else: return get_meth_from_oosend(op).graph @@ -246,7 +246,7 @@ d = {} for i, op in enumerate(block.operations): if op.opname == "direct_call": - funcobj = get_funcobj(op.args[0].value) + funcobj = op.args[0].value._obj elif op.opname == "oosend": funcobj = get_meth_from_oosend(op) if funcobj is None: @@ -622,7 +622,7 @@ for block in parentgraph.iterblocks(): for op in block.operations: if op.opname == "direct_call": - funcobj = get_funcobj(op.args[0].value) + funcobj = op.args[0].value._obj graph = getattr(funcobj, 'graph', None) if graph is not None and graph in ok_to_call: if getattr(getattr(funcobj, '_callable', None), @@ -654,7 +654,7 @@ op = ops[i] i -= 1 if op.opname == "direct_call": - funcobj = get_funcobj(op.args[0].value) + funcobj = op.args[0].value._obj graph = getattr(funcobj, 'graph', None) if graph is not None: if getattr(getattr(funcobj, '_callable', None), diff --git a/rpython/translator/backendopt/test/test_canraise.py b/rpython/translator/backendopt/test/test_canraise.py --- a/rpython/translator/backendopt/test/test_canraise.py +++ b/rpython/translator/backendopt/test/test_canraise.py @@ -1,6 +1,4 @@ -import py from rpython.translator.translator import TranslationContext, graphof -from rpython.translator.simplify import get_funcobj from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.all import backend_optimizations from rpython.conftest import option @@ -144,7 +142,7 @@ # check that we fished the expected ops def check_call(op, fname): assert op.opname == "direct_call" - assert get_funcobj(op.args[0].value)._name == fname + assert op.args[0].value._obj._name == fname check_call(op_call_f, "f") check_call(op_call_m, "m") diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -1,7 +1,5 @@ -import py from rpython.rtyper.lltypesystem import lltype from rpython.translator.translator import TranslationContext, graphof -from rpython.translator.simplify import get_funcobj from rpython.translator.backendopt.writeanalyze import WriteAnalyzer, top_set from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer from rpython.translator.backendopt.all import backend_optimizations @@ -159,7 +157,7 @@ # check that we fished the expected ops def check_call(op, fname): assert op.opname == "direct_call" - assert get_funcobj(op.args[0].value)._name == fname + assert op.args[0].value._obj._name == fname check_call(op_call_f, "f") check_call(op_call_m, "m") @@ -193,7 +191,6 @@ def test_llexternal(self): from rpython.rtyper.lltypesystem.rffi import llexternal - from rpython.rtyper.lltypesystem import lltype z = llexternal('z', [lltype.Signed], lltype.Signed) def f(x): return z(x) @@ -307,7 +304,7 @@ # check that we fished the expected ops assert op_call_f.opname == "direct_call" - assert get_funcobj(op_call_f.args[0].value)._name == 'A.f' + assert op_call_f.args[0].value._obj._name == 'A.f' result = wa.analyze(op_call_f) assert len(result) == 2 diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -4,7 +4,6 @@ simplify_graph() applies all simplifications defined in this file. """ - import py from rpython.flowspace import operation @@ -16,16 +15,6 @@ from rpython.rtyper.lltypesystem import lloperation, lltype from rpython.rtyper.ootypesystem import ootype -def get_funcobj(func): - """ - Return an object which is supposed to have attributes such as graph and - _callable - """ - if hasattr(func, '_obj'): - return func._obj # lltypesystem - else: - return func # ootypesystem - def get_functype(TYPE): if isinstance(TYPE, lltype.Ptr): return TYPE.TO @@ -39,7 +28,7 @@ f = arg.value if not isinstance(f, lltype._ptr) and not isinstance(f, ootype._callable): return None - funcobj = get_funcobj(f) + funcobj = f._obj try: callable = funcobj._callable except (AttributeError, KeyError, AssertionError): From noreply at buildbot.pypy.org Tue Jul 23 14:11:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 23 Jul 2013 14:11:57 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Import cffi/b3729934bf48 Message-ID: <20130723121157.0871C1C02A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65555:014395ed8ecd Date: 2013-07-18 22:14 +0200 http://bitbucket.org/pypy/pypy/changeset/014395ed8ecd/ Log: Import cffi/b3729934bf48 (transplanted from d2b06cce2209db4191e50153da4df9c02b5cdd81) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1219,6 +1219,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1238,6 +1286,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") From noreply at buildbot.pypy.org Tue Jul 23 14:47:46 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 14:47:46 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill get_functype() since it is now trivial Message-ID: <20130723124746.8DD231C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65556:11b518ec7c25 Date: 2013-07-23 14:47 +0200 http://bitbucket.org/pypy/pypy/changeset/11b518ec7c25/ Log: Kill get_functype() since it is now trivial diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -8,7 +8,6 @@ from rpython.jit.codewriter.effectinfo import (VirtualizableAnalyzer, QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) -from rpython.translator.simplify import get_functype from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer @@ -191,7 +190,7 @@ interp to really do the call corresponding to 'inline_call' ops. """ fnptr = self.rtyper.type_system.getcallable(graph) - FUNC = get_functype(lltype.typeOf(fnptr)) + FUNC = lltype.typeOf(fnptr).TO assert self.rtyper.type_system.name == "lltypesystem" fnaddr = llmemory.cast_ptr_to_adr(fnptr) NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void] @@ -212,7 +211,7 @@ if x.concretetype is not lltype.Void] RESULT = op.result.concretetype # check the number and type of arguments - FUNC = get_functype(op.args[0].concretetype) + FUNC = op.args[0].concretetype.TO ARGS = FUNC.ARGS assert NON_VOID_ARGS == [T for T in ARGS if T is not lltype.Void] assert RESULT == FUNC.RESULT diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -13,7 +13,6 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.debug import fatalerror from rpython.rlib.rstackovf import StackOverflow -from rpython.translator.simplify import get_functype from rpython.translator.backendopt import removenoops from rpython.translator.unsimplify import call_final_function @@ -661,7 +660,7 @@ def helper_func(self, FUNCPTR, func): if not self.cpu.translate_support_code: return llhelper(FUNCPTR, func) - FUNC = get_functype(FUNCPTR) + FUNC = FUNCPTR.TO args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] s_result = annmodel.lltype_to_annotation(FUNC.RESULT) graph = self.annhelper.getgraph(func, args_s, s_result) diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -13,7 +13,6 @@ from rpython.rtyper import extregistry from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.rmodel import warning -from rpython.translator.simplify import get_functype class KeyComp(object): @@ -283,8 +282,8 @@ for p, graph in self.delayedfuncs: self.newgraphs[graph] = True real_p = rtyper.getcallable(graph) - REAL = get_functype(lltype.typeOf(real_p)) - FUNCTYPE = get_functype(lltype.typeOf(p)) + REAL = lltype.typeOf(real_p).TO + FUNCTYPE = lltype.typeOf(p).TO if isinstance(FUNCTYPE, (lltype.ForwardReference, ootype.ForwardReference)): FUNCTYPE.become(REAL) assert FUNCTYPE == REAL @@ -334,7 +333,7 @@ p = self.instance.llfnptr TYPE = lltype.typeOf(p) c_func = Constant(p, TYPE) - FUNCTYPE = get_functype(TYPE) + FUNCTYPE = TYPE.TO for r_arg, ARGTYPE in zip(args_r, FUNCTYPE.ARGS): assert r_arg.lowleveltype == ARGTYPE assert r_res.lowleveltype == FUNCTYPE.RESULT diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -13,20 +13,12 @@ from rpython.translator import unsimplify from rpython.translator.backendopt import ssa from rpython.rtyper.lltypesystem import lloperation, lltype -from rpython.rtyper.ootypesystem import ootype - -def get_functype(TYPE): - if isinstance(TYPE, lltype.Ptr): - return TYPE.TO - elif isinstance(TYPE, (ootype.StaticMethod, ootype.ForwardReference)): - return TYPE - assert False def get_graph(arg, translator): if isinstance(arg, Variable): return None f = arg.value - if not isinstance(f, lltype._ptr) and not isinstance(f, ootype._callable): + if not isinstance(f, lltype._ptr): return None funcobj = f._obj try: From noreply at buildbot.pypy.org Tue Jul 23 16:16:34 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 16:16:34 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: rm now useless test Message-ID: <20130723141634.4D1C51C02A1@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65557:aea6a7833798 Date: 2013-07-23 16:05 +0200 http://bitbucket.org/pypy/pypy/changeset/aea6a7833798/ Log: rm now useless test diff --git a/rpython/translator/test/test_interactive.py b/rpython/translator/test/test_interactive.py --- a/rpython/translator/test/test_interactive.py +++ b/rpython/translator/test/test_interactive.py @@ -78,18 +78,3 @@ dll = ctypes.CDLL(str(t.driver.c_entryp)) f = dll.pypy_g_f assert f(2, 3) == 5 - -def test_simple_rtype_with_type_system(): - - def f(x,y): - return x+y - - t = Translation(f, [int, int]) - t.rtype(type_system='lltype') - - assert 'rtype_lltype' in t.driver.done - - t = Translation(f, [int, int], backend='cli') - t.annotate() - py.test.raises(Exception, "t.rtype(type_system='lltype')") - From noreply at buildbot.pypy.org Tue Jul 23 16:16:35 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 16:16:35 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill OOTypeExceptionTransformer Message-ID: <20130723141635.7E1001C02A1@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65558:ee897d8a69ab Date: 2013-07-23 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/ee897d8a69ab/ Log: Kill OOTypeExceptionTransformer diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -5,7 +5,6 @@ from rpython.flowspace.model import Block, Constant, Variable, Link, \ c_last_exception, SpaceOperation, FunctionGraph, mkentrymap from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.lltypesystem import lloperation from rpython.rtyper import rtyper from rpython.rtyper.rmodel import inputconst @@ -37,8 +36,6 @@ return PrimitiveErrorValue[T] elif isinstance(T, lltype.Ptr): return lltype.nullptr(T.TO) - elif isinstance(T, ootype.OOType): - return ootype.null(T) assert 0, "not implemented yet" def error_constant(T): @@ -47,7 +44,7 @@ def constant_value(llvalue): return Constant(llvalue, lltype.typeOf(llvalue)) -class BaseExceptionTransformer(object): +class ExceptionTransformer(object): def __init__(self, translator): self.translator = translator @@ -168,7 +165,7 @@ def build_func(self, name, fn, inputtypes, rettype, **kwds): l2a = annmodel.lltype_to_annotation graph = self.mixlevelannotator.getgraph(fn, map(l2a, inputtypes), l2a(rettype)) - return self.constant_func(name, inputtypes, rettype, graph, + return self.constant_func(name, inputtypes, rettype, graph, exception_policy="exc_helper", **kwds) def get_builtin_exception(self, Class): @@ -249,7 +246,7 @@ if block.exitswitch == c_last_exception: need_exc_matching = True last_operation -= 1 - elif (len(block.exits) == 1 and + elif (len(block.exits) == 1 and block.exits[0].target is graph.returnblock and len(block.operations) and (block.exits[0].args[0].concretetype is lltype.Void or @@ -369,7 +366,7 @@ opargs.append(var) newop = SpaceOperation(op.opname, opargs, result) startblock = Block(inputargs) - startblock.operations.append(newop) + startblock.operations.append(newop) newgraph = FunctionGraph("dummy_exc1", startblock) startblock.closeblock(Link([result], newgraph.returnblock)) newgraph.returnblock.inputargs[0].concretetype = op.result.concretetype @@ -393,7 +390,7 @@ startblock.exits[True].target = excblock startblock.exits[True].args = [] fptr = self.constant_func("dummy_exc1", ARGTYPES, op.result.concretetype, newgraph) - return newgraph, SpaceOperation("direct_call", [fptr] + callargs, op.result) + return newgraph, SpaceOperation("direct_call", [fptr] + callargs, op.result) def gen_exc_check(self, block, returnblock, normalafterblock=None): #var_exc_occured = Variable() @@ -463,9 +460,6 @@ [v_result_after], varoftype(lltype.Void))) - -class LLTypeExceptionTransformer(BaseExceptionTransformer): - def setup_excdata(self): EXCDATA = lltype.Struct('ExcData', ('exc_type', self.lltype_of_exception_type), @@ -538,61 +532,3 @@ "RPyGetExcValueAddr", rpyexc_get_exc_value_addr, [], llmemory.Address) - - -class OOTypeExceptionTransformer(BaseExceptionTransformer): - - def setup_excdata(self): - EXCDATA = ootype.Record({'exc_type': self.lltype_of_exception_type, - 'exc_value': self.lltype_of_exception_value}) - self.EXCDATA = EXCDATA - - exc_data = ootype.new(EXCDATA) - null_type = ootype.null(self.lltype_of_exception_type) - null_value = ootype.null(self.lltype_of_exception_value) - - self.exc_data_ptr = exc_data - self.cexcdata = Constant(exc_data, self.EXCDATA) - - self.c_null_etype = Constant(null_type, self.lltype_of_exception_type) - self.c_null_evalue = Constant(null_value, self.lltype_of_exception_value) - - return exc_data, null_type, null_value - - def constant_func(self, name, inputtypes, rettype, graph, **kwds): - FUNC_TYPE = ootype.StaticMethod(inputtypes, rettype) - fn_ptr = ootype.static_meth(FUNC_TYPE, name, graph=graph, **kwds) - return Constant(fn_ptr, FUNC_TYPE) - - def gen_getfield(self, name, llops): - c_name = inputconst(lltype.Void, name) - return llops.genop('oogetfield', [self.cexcdata, c_name], - resulttype = self.EXCDATA._field_type(name)) - - def gen_setfield(self, name, v_value, llops): - c_name = inputconst(lltype.Void, name) - llops.genop('oosetfield', [self.cexcdata, c_name, v_value]) - - def gen_isnull(self, v, llops): - nonnull = self.gen_nonnull(v, llops) - return llops.genop('bool_not', [nonnull], lltype.Bool) - - def gen_nonnull(self, v, llops): - return llops.genop('oononnull', [v], lltype.Bool) - - def same_obj(self, obj1, obj2): - return obj1 is obj2 - - def check_for_alloc_shortcut(self, spaceop): - return False - - def build_extra_funcs(self): - pass - -def ExceptionTransformer(translator): - type_system = translator.rtyper.type_system.name - if type_system == 'lltypesystem': - return LLTypeExceptionTransformer(translator) - else: - assert type_system == 'ootypesystem' - return OOTypeExceptionTransformer(translator) From noreply at buildbot.pypy.org Tue Jul 23 18:05:27 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 18:05:27 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: clean up Message-ID: <20130723160527.5AE251C13EE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65559:25666cf63aa0 Date: 2013-07-23 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/25666cf63aa0/ Log: clean up diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -402,7 +402,6 @@ spaceop = block.operations[-1] alloc_shortcut = self.check_for_alloc_shortcut(spaceop) - # XXX: does alloc_shortcut make sense also for ootype? if alloc_shortcut: var_no_exc = self.gen_nonnull(spaceop.result, llops) else: diff --git a/rpython/translator/test/test_exceptiontransform.py b/rpython/translator/test/test_exceptiontransform.py --- a/rpython/translator/test/test_exceptiontransform.py +++ b/rpython/translator/test/test_exceptiontransform.py @@ -65,8 +65,6 @@ assert f() == 1 def test_passthrough(self): - if self.type_system == 'ootype': - py.test.skip("XXX") def one(x): if x: raise ValueError() @@ -145,8 +143,6 @@ assert result == 2 def test_raises(self): - if self.type_system == 'ootype': - py.test.skip("XXX") def foo(x): if x: raise ValueError() From noreply at buildbot.pypy.org Tue Jul 23 18:05:28 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 18:05:28 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: remove rpython.translator.backendopt.checkvirtual Message-ID: <20130723160528.8F5881C13EE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65560:4576b880cad6 Date: 2013-07-23 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/4576b880cad6/ Log: remove rpython.translator.backendopt.checkvirtual diff --git a/rpython/translator/backendopt/all.py b/rpython/translator/backendopt/all.py --- a/rpython/translator/backendopt/all.py +++ b/rpython/translator/backendopt/all.py @@ -9,7 +9,6 @@ from rpython.translator.backendopt import mallocprediction from rpython.translator.backendopt.removeassert import remove_asserts from rpython.translator.backendopt.support import log -from rpython.translator.backendopt.checkvirtual import check_virtual_methods from rpython.translator.backendopt.storesink import storesink_graph from rpython.flowspace.model import checkgraph @@ -53,9 +52,6 @@ if config.raisingop2direct_call: raisingop2direct_call(translator, graphs) - if translator.rtyper.type_system.name == 'ootypesystem': - check_virtual_methods() - if config.remove_asserts: constfold(config, graphs) remove_asserts(translator, graphs) @@ -93,7 +89,7 @@ if config.clever_malloc_removal: threshold = config.clever_malloc_removal_threshold - heuristic = get_function(config.clever_malloc_removal_heuristic) + heuristic = get_function(config.clever_malloc_removal_heuristic) log.inlineandremove("phase with threshold factor: %s" % threshold) log.inlineandremove("heuristic: %s.%s" % (heuristic.__module__, heuristic.__name__)) @@ -105,7 +101,7 @@ constfold(config, graphs) if config.print_statistics: print "after clever inlining and malloc removal" - print_statistics(translator.graphs[0], translator) + print_statistics(translator.graphs[0], translator) if config.storesink: for graph in graphs: @@ -145,7 +141,7 @@ def constfold(config, graphs): if config.constfold: for graph in graphs: - constant_fold_graph(graph) + constant_fold_graph(graph) def inline_malloc_removal_phase(config, translator, graphs, inline_threshold, inline_heuristic, @@ -175,4 +171,4 @@ if config.print_statistics: print "after malloc removal:" - print_statistics(translator.graphs[0], translator) + print_statistics(translator.graphs[0], translator) diff --git a/rpython/translator/backendopt/checkvirtual.py b/rpython/translator/backendopt/checkvirtual.py deleted file mode 100644 --- a/rpython/translator/backendopt/checkvirtual.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Visit all known INSTANCEs to see which methods can be marked as -non-virtual: a method is marked as non-virtual when it's never -overridden in the subclasses: this means that backends can translate -oosends relative to that method into non-virtual call (or maybe -switching back to a direct_call if the backend doesn't support -non-virtual calls, such as JVM). -""" - -from rpython.rtyper.ootypesystem import ootype - -def check_virtual_methods(INSTANCE=ootype.ROOT, super_methods = {}): - my_methods = super_methods.copy() - for name, method in INSTANCE._methods.iteritems(): - method._virtual = False - my_methods[name] = method - if name in super_methods: - super_methods[name]._virtual = True - - for SUB_INSTANCE in INSTANCE._subclasses: - check_virtual_methods(SUB_INSTANCE, my_methods) diff --git a/rpython/translator/backendopt/test/test_checkvirtual.py b/rpython/translator/backendopt/test/test_checkvirtual.py deleted file mode 100644 --- a/rpython/translator/backendopt/test/test_checkvirtual.py +++ /dev/null @@ -1,57 +0,0 @@ -from rpython.rtyper.ootypesystem.ootype import ROOT, Instance, \ - addMethods, meth, Meth, Void -from rpython.translator.backendopt.checkvirtual import check_virtual_methods - -def test_nonvirtual(): - A = Instance("A", ROOT) - addMethods(A, {"foo": meth(Meth([], Void))}) - - check_virtual_methods() - assert A._methods["foo"]._virtual == False - -def test_checkvirtual_simple(): - A = Instance("A", ROOT) - B = Instance("B", A) - - addMethods(A, {"foo": meth(Meth([], Void)), - "bar": meth(Meth([], Void))}) - - addMethods(B, {"foo": meth(Meth([], Void))}) - - check_virtual_methods() - assert A._methods["foo"]._virtual == True - assert A._methods["bar"]._virtual == False - assert B._methods["foo"]._virtual == False - -def test_checkvirtual_deep(): - A = Instance("A", ROOT) - B = Instance("B", A) - C = Instance("C", B) - - addMethods(A, {"foo": meth(Meth([], Void)), - "bar": meth(Meth([], Void))}) - - addMethods(C, {"foo": meth(Meth([], Void))}) - - check_virtual_methods() - assert A._methods["foo"]._virtual == True - assert A._methods["bar"]._virtual == False - assert "foo" not in B._methods - assert C._methods["foo"]._virtual == False - -def test_checkvirtual_brother(): - A = Instance("A", ROOT) - B1 = Instance("B1", A) - B2 = Instance("B2", A) - - addMethods(A, {"foo": meth(Meth([], Void)), - "bar": meth(Meth([], Void))}) - - addMethods(B1, {"foo": meth(Meth([], Void))}) - - check_virtual_methods() - assert A._methods["foo"]._virtual == True - assert A._methods["bar"]._virtual == False - assert B1._methods["foo"]._virtual == False - assert "foo" not in B2._methods - diff --git a/rpython/translator/backendopt/test/test_inline.py b/rpython/translator/backendopt/test/test_inline.py --- a/rpython/translator/backendopt/test/test_inline.py +++ b/rpython/translator/backendopt/test/test_inline.py @@ -1,16 +1,11 @@ # XXX clean up these tests to use more uniform helpers import py -import os -from rpython.flowspace.model import Block, Link, Variable, Constant -from rpython.flowspace.model import last_exception, checkgraph +from rpython.flowspace.model import Variable, Constant, checkgraph from rpython.translator.backendopt import canraise -from rpython.translator.backendopt.inline import simple_inline_function, CannotInline -from rpython.translator.backendopt.inline import auto_inlining, Inliner -from rpython.translator.backendopt.inline import collect_called_graphs -from rpython.translator.backendopt.inline import measure_median_execution_cost -from rpython.translator.backendopt.inline import instrument_inline_candidates -from rpython.translator.backendopt.inline import auto_inline_graphs -from rpython.translator.backendopt.checkvirtual import check_virtual_methods +from rpython.translator.backendopt.inline import (simple_inline_function, + CannotInline, auto_inlining, Inliner, collect_called_graphs, + measure_median_execution_cost, instrument_inline_candidates, + auto_inline_graphs) from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.test.tool import BaseRtypingTest @@ -87,11 +82,8 @@ return eval_func def check_auto_inlining(self, func, sig, multiplier=None, call_count_check=False, - checkvirtual=False, remove_same_as=False, heuristic=None, - const_fold_first=False): + remove_same_as=False, heuristic=None, const_fold_first=False): t = self.translate(func, sig) - if checkvirtual: - check_virtual_methods() if const_fold_first: from rpython.translator.backendopt.constfold import constant_fold_graph from rpython.translator.simplify import eliminate_empty_blocks @@ -556,7 +548,7 @@ tot += item return tot - eval_func, t = self.check_auto_inlining(f, [], checkvirtual=True) + eval_func, t = self.check_auto_inlining(f, []) f_graph = graphof(t, f) called_graphs = collect_called_graphs(f_graph, t, include_oosend=False) assert len(called_graphs) == 0 From noreply at buildbot.pypy.org Tue Jul 23 18:21:18 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 18:21:18 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove ootype support from rpython.translator.backendopt.inline Message-ID: <20130723162118.1588B1C13EE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65561:e05eac0b3ff9 Date: 2013-07-23 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/e05eac0b3ff9/ Log: Remove ootype support from rpython.translator.backendopt.inline diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -3,7 +3,6 @@ from rpython.flowspace.model import (Variable, Constant, Block, Link, SpaceOperation, c_last_exception, FunctionGraph, mkentrymap) from rpython.rtyper.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr, normalizeptr -from rpython.rtyper.ootypesystem import ootype from rpython.tool.algo import sparsemat from rpython.translator.backendopt import removenoops from rpython.translator.backendopt.canraise import RaiseAnalyzer @@ -16,22 +15,12 @@ pass -def get_meth_from_oosend(op): - INSTANCE = op.args[1].concretetype - _, meth = INSTANCE._lookup(op.args[0].value) - virtual = getattr(meth, '_virtual', True) - if virtual: - return None - else: - return meth - - class CanRaise(object): def __init__(self, can_raise): self.can_raise = can_raise -def collect_called_graphs(graph, translator, include_oosend=True): +def collect_called_graphs(graph, translator): graphs_or_something = set() for block in graph.iterblocks(): for op in block.operations: @@ -48,15 +37,6 @@ else: for graph in graphs: graphs_or_something.add(graph) - if op.opname == 'oosend' and include_oosend: - meth = get_meth_from_oosend(op) - if hasattr(meth, 'graph'): - key = meth.graph - elif hasattr(meth, '_can_raise'): - key = CanRaise(meth._can_raise) - else: - key = op.args[0] - graphs_or_something.add(key) return graphs_or_something def iter_callsites(graph, calling_what): @@ -64,10 +44,6 @@ for i, op in enumerate(block.operations): if op.opname == "direct_call": funcobj = op.args[0].value._obj - elif op.opname == "oosend": - funcobj = get_meth_from_oosend(op) - if funcobj is None: - continue # cannot inline virtual methods else: continue @@ -119,21 +95,16 @@ while True: if isinstance(currvar, Constant): value = currvar.value - if isinstance(typeOf(value), ootype.Instance): - TYPE = ootype.dynamicType(value) - else: - TYPE = typeOf(normalizeptr(value)) + TYPE = typeOf(normalizeptr(value)) return TYPE, block.exits[0] if i < 0: return None, None op = ops[i] i -= 1 - if op.opname in ("same_as", "cast_pointer", "ooupcast", "oodowncast") and op.result is currvar: + if op.opname in ("same_as", "cast_pointer") and op.result is currvar: currvar = op.args[0] elif op.opname == "malloc" and op.result is currvar: return Ptr(op.args[0].value), block.exits[0] - elif op.opname == "new" and op.result is currvar: - return op.args[0].value, block.exits[0] def does_raise_directly(graph, raise_analyzer): """ this function checks, whether graph contains operations which can raise @@ -216,11 +187,8 @@ return count def get_graph_from_op(self, op): - assert op.opname in ('direct_call', 'oosend') - if op.opname == 'direct_call': - return self.op.args[0].value._obj.graph - else: - return get_meth_from_oosend(op).graph + assert op.opname == 'direct_call' + return self.op.args[0].value._obj.graph def inline_once(self, block, index_operation): self.varmap = {} @@ -247,10 +215,6 @@ for i, op in enumerate(block.operations): if op.opname == "direct_call": funcobj = op.args[0].value._obj - elif op.opname == "oosend": - funcobj = get_meth_from_oosend(op) - if funcobj is None: - continue else: continue graph = getattr(funcobj, 'graph', None) @@ -457,26 +421,6 @@ passon_args.append(linktoinlined.args[index]) passon_args += self.original_passon_vars - if self.op.opname == 'oosend' and not isinstance(self.op.args[1], Constant): - # if we try to inline a graph defined in a superclass, the - # type of 'self' on the graph differs from the current - linkv = passon_args[0] - inputv = copiedstartblock.inputargs[0] - LINK_SELF = linkv.concretetype - INPUT_SELF = inputv.concretetype - if LINK_SELF != INPUT_SELF: - # need to insert an upcast - if ootype.isSubclass(LINK_SELF, INPUT_SELF): - opname = 'ooupcast' - else: - assert ootype.isSubclass(INPUT_SELF, LINK_SELF) - opname = 'oodowncast' - v = Variable() - v.concretetype = INPUT_SELF - upcast = SpaceOperation(opname, [linkv], v) - block.operations.append(upcast) - passon_args[0] = v - #rewire blocks linktoinlined.target = copiedstartblock linktoinlined.args = passon_args @@ -544,8 +488,6 @@ total += 1.5 + len(op.args) / 2 elif op.opname == "indirect_call": total += 2 + len(op.args) / 2 - elif op.opname == "oosend": - total += 2 + len(op.args) / 2 total += weights.get(op.opname, 1) if block.exitswitch is not None: total += 1 @@ -629,11 +571,6 @@ '_dont_inline_', False): continue add(parentgraph, block, op, graph) - if op.opname == "oosend": - meth = get_meth_from_oosend(op) - graph = getattr(meth, 'graph', None) - if graph is not None and graph in ok_to_call: - add(parentgraph, block, op, graph) return result def instrument_inline_candidates(graphs, threshold): diff --git a/rpython/translator/backendopt/test/test_inline.py b/rpython/translator/backendopt/test/test_inline.py --- a/rpython/translator/backendopt/test/test_inline.py +++ b/rpython/translator/backendopt/test/test_inline.py @@ -550,7 +550,7 @@ eval_func, t = self.check_auto_inlining(f, []) f_graph = graphof(t, f) - called_graphs = collect_called_graphs(f_graph, t, include_oosend=False) + called_graphs = collect_called_graphs(f_graph, t) assert len(called_graphs) == 0 result = eval_func([]) From noreply at buildbot.pypy.org Tue Jul 23 18:50:18 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 18:50:18 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Clean up rpython/translator/backendopt/ Message-ID: <20130723165018.93A191C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65562:feed2f2df76b Date: 2013-07-23 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/feed2f2df76b/ Log: Clean up rpython/translator/backendopt/ diff --git a/rpython/translator/backendopt/malloc.py b/rpython/translator/backendopt/malloc.py --- a/rpython/translator/backendopt/malloc.py +++ b/rpython/translator/backendopt/malloc.py @@ -1,7 +1,6 @@ from rpython.flowspace.model import Variable, Constant, SpaceOperation from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.ootypesystem import ootype from rpython.translator import simplify from rpython.translator.backendopt import removenoops from rpython.translator.backendopt.support import log @@ -538,10 +537,7 @@ def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True): - if type_system == 'lltypesystem': - remover = LLTypeMallocRemover(verbose) - else: - remover = OOTypeMallocRemover(verbose) + remover = LLTypeMallocRemover(verbose) return remover.remove_simple_mallocs(graph) diff --git a/rpython/translator/backendopt/test/test_mallocv.py b/rpython/translator/backendopt/test/test_mallocv.py --- a/rpython/translator/backendopt/test/test_mallocv.py +++ b/rpython/translator/backendopt/test/test_mallocv.py @@ -1,17 +1,11 @@ import py import sys from rpython.translator.backendopt.mallocv import MallocVirtualizer -from rpython.translator.backendopt.inline import inline_function -from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.translator import TranslationContext, graphof -from rpython.translator import simplify -from rpython.flowspace.model import checkgraph, Block, mkentrymap from rpython.flowspace.model import summary from rpython.rtyper.llinterp import LLInterpreter, LLException -from rpython.rtyper.lltypesystem import lltype, llmemory, lloperation -from rpython.rtyper.ootypesystem import ootype +from rpython.rtyper.lltypesystem import lltype, lloperation from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib import objectmodel from rpython.rlib.rarithmetic import ovfcheck from rpython.conftest import option @@ -22,13 +16,8 @@ self.excname = excname -class BaseMallocRemovalTest(object): - type_system = None - MallocRemover = None - - def _skip_oo(self, msg): - if self.type_system == 'ootype': - py.test.skip(msg) +class TestMallocRemoval(object): + type_system = 'lltype' def check_malloc_removed(cls, graph, expected_mallocs, expected_calls): count_mallocs = 0 @@ -59,7 +48,6 @@ mallocv = MallocVirtualizer(t.graphs, t.rtyper, verbose=True) while True: progress = mallocv.remove_mallocs_once() - #simplify.transform_dead_op_vars_in_blocks(list(graph.iterblocks())) if progress and option.view: t.view() t.checkgraphs() @@ -552,10 +540,6 @@ # g(Virtual, Virtual) -class TestLLTypeMallocRemoval(BaseMallocRemovalTest): - type_system = 'lltype' - #MallocRemover = LLTypeMallocRemover - def test_getsubstruct(self): SMALL = lltype.Struct('SMALL', ('x', lltype.Signed)) BIG = lltype.GcStruct('BIG', ('z', lltype.Signed), ('s', SMALL)) @@ -802,32 +786,3 @@ t.y += 1 return s.x graph = self.check(f, [int], [5], 123) - - -class DISABLED_TestOOTypeMallocRemoval(BaseMallocRemovalTest): - type_system = 'ootype' - #MallocRemover = OOTypeMallocRemover - - def test_oononnull(self): - FOO = ootype.Instance('Foo', ootype.ROOT) - def fn(): - s = ootype.new(FOO) - return bool(s) - self.check(fn, [], [], True) - - def test_classattr_as_defaults(self): - class Bar: - foo = 41 - - def fn(): - x = Bar() - x.foo += 1 - return x.foo - self.check(fn, [], [], 42) - - def test_fn5(self): - # don't test this in ootype because the class attribute access - # is turned into an oosend which prevents malloc removal to - # work unless we inline first. See test_classattr in - # test_inline.py - pass diff --git a/rpython/translator/backendopt/test/test_storesink.py b/rpython/translator/backendopt/test/test_storesink.py --- a/rpython/translator/backendopt/test/test_storesink.py +++ b/rpython/translator/backendopt/test/test_storesink.py @@ -3,13 +3,12 @@ from rpython.translator.translator import TranslationContext, graphof from rpython.translator.backendopt.storesink import storesink_graph from rpython.translator.backendopt import removenoops -from rpython.flowspace.model import last_exception, checkgraph +from rpython.flowspace.model import checkgraph from rpython.conftest import option class TestStoreSink(object): - # not sure if it makes any sense on ootype, maybe worth trying type_system = 'lltype' - + def translate(self, func, argtypes): t = TranslationContext() t.buildannotator().build_types(func, argtypes) @@ -37,7 +36,7 @@ def test_infrastructure(self): class A(object): pass - + def f(i): a = A() a.x = i @@ -55,7 +54,7 @@ return a.x + a.x self.check(f, [int], 1) - + def test_irrelevant_setfield(self): class A(object): pass diff --git a/rpython/translator/backendopt/writeanalyze.py b/rpython/translator/backendopt/writeanalyze.py --- a/rpython/translator/backendopt/writeanalyze.py +++ b/rpython/translator/backendopt/writeanalyze.py @@ -1,6 +1,5 @@ from rpython.flowspace.model import Variable from rpython.translator.backendopt import graphanalyze -from rpython.rtyper.ootypesystem import ootype top_set = object() empty_set = frozenset() @@ -36,7 +35,7 @@ return result1.union(result2) def analyze_simple_operation(self, op, graphinfo): - if op.opname in ("setfield", "oosetfield"): + if op.opname == "setfield": if graphinfo is None or not graphinfo.is_fresh_malloc(op.args[0]): return frozenset([ ("struct", op.args[0].concretetype, op.args[1].value)]) @@ -48,15 +47,6 @@ def _array_result(self, TYPE): return frozenset([("array", TYPE)]) - def analyze_external_method(self, op, TYPE, meth): - if isinstance(TYPE, ootype.Array): - methname = op.args[0].value - if methname == 'll_setitem_fast': - return self._array_result(op.args[1].concretetype) - elif methname in ('ll_getitem_fast', 'll_length'): - return self.bottom_result() - return graphanalyze.GraphAnalyzer.analyze_external_method(self, op, TYPE, meth) - def compute_graph_info(self, graph): return FreshMallocs(graph) From noreply at buildbot.pypy.org Tue Jul 23 19:15:36 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 19:15:36 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: clean up in rpython/rlib/ Message-ID: <20130723171536.A6B081C14B6@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65563:8f69c5c10b48 Date: 2013-07-23 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/8f69c5c10b48/ Log: clean up in rpython/rlib/ diff --git a/rpython/rlib/_jit_vref.py b/rpython/rlib/_jit_vref.py --- a/rpython/rlib/_jit_vref.py +++ b/rpython/rlib/_jit_vref.py @@ -6,8 +6,6 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.error import TyperError -from rpython.rtyper.ootypesystem import ootype - class SomeVRef(annmodel.SomeObject): @@ -28,10 +26,7 @@ return annmodel.s_Bool def rtyper_makerepr(self, rtyper): - if rtyper.type_system.name == 'lltypesystem': - return vrefrepr - elif rtyper.type_system.name == 'ootypesystem': - return oovrefrepr + return vrefrepr def rtyper_makekey(self): return self.__class__, @@ -70,21 +65,4 @@ hop.exception_cannot_occur() return hop.genop('jit_is_virtual', [v], resulttype = lltype.Bool) -from rpython.rtyper.ootypesystem.rclass import OBJECT - -class OOVRefRepr(VRefRepr): - lowleveltype = OBJECT - def rtype_simple_call(self, hop): - [v] = hop.inputargs(self) - hop.exception_is_here() - v = hop.genop('jit_force_virtual', [v], resulttype = OBJECT) - return hop.genop('oodowncast', [v], resulttype = hop.r_result) - - def convert_const(self, value): - if value() is not None: - raise TypeError("only supports virtual_ref_None as a" - " prebuilt virtual_ref") - return ootype.ROOT._null - vrefrepr = VRefRepr() -oovrefrepr = OOVRefRepr() diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -758,12 +758,6 @@ args_s.append(s_arg) bk.emulate_pbc_call(uniquekey, s_func, args_s) - def get_getfield_op(self, rtyper): - if rtyper.type_system.name == 'ootypesystem': - return 'oogetfield' - else: - return 'getfield' - def specialize_call(self, hop, **kwds_i): # XXX to be complete, this could also check that the concretetype # of the variables are the same for each of the calls. @@ -792,10 +786,7 @@ "field %r not found in %r" % (name, r_red.lowleveltype.TO)) r_red = r_red.rbase - if hop.rtyper.type_system.name == 'ootypesystem': - GTYPE = r_red.lowleveltype - else: - GTYPE = r_red.lowleveltype.TO + GTYPE = r_red.lowleveltype.TO assert GTYPE._immutable_field(mangled_name), ( "field %r must be declared as immutable" % name) if not hasattr(driver, 'll_greenfields'): @@ -804,8 +795,7 @@ # v_red = hop.inputarg(r_red, arg=i) c_llname = hop.inputconst(lltype.Void, mangled_name) - getfield_op = self.get_getfield_op(hop.rtyper) - v_green = hop.genop(getfield_op, [v_red, c_llname], + v_green = hop.genop('getfield', [v_red, c_llname], resulttype=r_field) s_green = s_red.classdef.about_attribute(fieldname) assert s_green is not None diff --git a/rpython/rlib/rerased.py b/rpython/rlib/rerased.py --- a/rpython/rlib/rerased.py +++ b/rpython/rlib/rerased.py @@ -181,10 +181,7 @@ return False # cannot be None, but can contain a None def rtyper_makerepr(self, rtyper): - if rtyper.type_system.name == 'lltypesystem': - return ErasedRepr(rtyper) - elif rtyper.type_system.name == 'ootypesystem': - return OOErasedRepr(rtyper) + return ErasedRepr(rtyper) def rtyper_makekey(self): return self.__class__, @@ -242,51 +239,3 @@ return lltype.nullptr(self.lowleveltype.TO) v = r_obj.convert_const(value._x) return lltype.cast_opaque_ptr(self.lowleveltype, v) - -from rpython.rtyper.ootypesystem import ootype - -class OOErasedRepr(Repr): - lowleveltype = ootype.Object - def __init__(self, rtyper): - self.rtyper = rtyper - - def rtype_erase(self, hop, s_obj): - hop.exception_cannot_occur() - r_obj = self.rtyper.getrepr(s_obj) - if r_obj.lowleveltype is lltype.Void: - return hop.inputconst(self.lowleveltype, - ootype.NULL) - [v_obj] = hop.inputargs(r_obj) - return hop.genop('cast_to_object', [v_obj], - resulttype=self.lowleveltype) - - def rtype_unerase(self, hop, s_obj): - [v] = hop.inputargs(hop.args_r[0]) - return hop.genop('cast_from_object', [v], resulttype=hop.r_result) - - def rtype_unerase_int(self, hop, v): - c_one = hop.inputconst(lltype.Signed, 1) - hop.exception_cannot_occur() - v2 = hop.genop('oounbox_int', [v], resulttype=hop.r_result) - return hop.genop('int_rshift', [v2, c_one], resulttype=lltype.Signed) - - def rtype_erase_int(self, hop): - [v_value] = hop.inputargs(lltype.Signed) - c_one = hop.inputconst(lltype.Signed, 1) - hop.exception_is_here() - v2 = hop.genop('int_add_ovf', [v_value, v_value], - resulttype = lltype.Signed) - v2p1 = hop.genop('int_add', [v2, c_one], - resulttype = lltype.Signed) - return hop.genop('oobox_int', [v2p1], resulttype=hop.r_result) - - def convert_const(self, value): - if value._identity is _identity_for_ints: - return value._x # FIXME: what should we do here? - bk = self.rtyper.annotator.bookkeeper - s_obj = value._identity.get_input_annotation(bk) - r_obj = self.rtyper.getrepr(s_obj) - if r_obj.lowleveltype is lltype.Void: - return ootype.NULL - v = r_obj.convert_const(value._x) - return ootype.cast_to_object(v) diff --git a/rpython/rlib/test/test__jit_vref.py b/rpython/rlib/test/test__jit_vref.py --- a/rpython/rlib/test/test__jit_vref.py +++ b/rpython/rlib/test/test__jit_vref.py @@ -4,14 +4,11 @@ from rpython.rlib._jit_vref import SomeVRef from rpython.annotator import model as annmodel from rpython.annotator.annrpython import RPythonAnnotator -from rpython.rtyper.test.test_llinterp import interpret from rpython.rtyper.lltypesystem.rclass import OBJECTPTR -from rpython.rtyper.ootypesystem.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.test.tool import BaseRtypingTest -from rpython.rtyper.ootypesystem import ootype class X(object): pass diff --git a/rpython/rlib/test/test_rerased.py b/rpython/rlib/test/test_rerased.py --- a/rpython/rlib/test/test_rerased.py +++ b/rpython/rlib/test/test_rerased.py @@ -5,9 +5,7 @@ from rpython.rlib.rerased import * from rpython.annotator import model as annmodel from rpython.annotator.annrpython import RPythonAnnotator -from rpython.rtyper.test.test_llinterp import interpret from rpython.rtyper.lltypesystem.rclass import OBJECTPTR -from rpython.rtyper.ootypesystem.rclass import OBJECT from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.test.tool import BaseRtypingTest diff --git a/rpython/rlib/test/test_rstackovf.py b/rpython/rlib/test/test_rstackovf.py --- a/rpython/rlib/test/test_rstackovf.py +++ b/rpython/rlib/test/test_rstackovf.py @@ -40,11 +40,6 @@ res = interpret(f, [sys.maxint]) assert res == 1 -def test_oointerp(): - from rpython.rtyper.test.test_llinterp import interpret - res = interpret(f, [sys.maxint], type_system='ootype') - assert res == 1 - def test_c_translation(): from rpython.translator.c.test.test_genc import compile fn = compile(f, [int]) From noreply at buildbot.pypy.org Tue Jul 23 19:47:02 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 23 Jul 2013 19:47:02 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill ootype LLOps Message-ID: <20130723174702.2E1FA1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65564:76485b3cc47f Date: 2013-07-23 19:46 +0200 http://bitbucket.org/pypy/pypy/changeset/76485b3cc47f/ Log: Kill ootype LLOps diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -14,7 +14,6 @@ from rpython.rlib.rarithmetic import (ovfcheck, is_valid_int, intmask, r_uint, r_longlong, r_ulonglong, r_longlonglong) from rpython.rtyper.lltypesystem import lltype, llmemory, lloperation, llheap, rclass -from rpython.rtyper.ootypesystem import ootype log = py.log.Producer('llinterp') @@ -43,11 +42,7 @@ return ': '.join([str(x) for x in self.args]) def type_name(etype): - if isinstance(lltype.typeOf(etype), lltype.Ptr): - return ''.join(etype.name).rstrip('\x00') - else: - # ootype! - return etype._INSTANCE._name.split(".")[-1] + return ''.join(etype.name).rstrip('\x00') class LLInterpreter(object): """ low level interpreter working with concrete values. """ @@ -150,12 +145,8 @@ assert isinstance(exc, LLException) klass, inst = exc.args[0], exc.args[1] for cls in enumerate_exceptions_top_down(): - if hasattr(klass, 'name'): # lltype - if "".join(klass.name).rstrip("\0") == cls.__name__: - return cls - else: # ootype - if klass._INSTANCE._name.split('.')[-1] == cls.__name__: - return cls + if "".join(klass.name).rstrip("\0") == cls.__name__: + return cls raise ValueError("couldn't match exception, maybe it" " has RPython attributes like OSError?") @@ -178,12 +169,6 @@ def checkadr(addr): assert lltype.typeOf(addr) is llmemory.Address -def is_inst(inst): - return isinstance(lltype.typeOf(inst), (ootype.Instance, ootype.BuiltinType, ootype.StaticMethod)) - -def checkinst(inst): - assert is_inst(inst) - class LLFrame(object): def __init__(self, graph, args, llinterpreter): @@ -863,8 +848,6 @@ PTR = lltype.typeOf(ptr) if isinstance(PTR, lltype.Ptr): return self.heap.gc_id(ptr) - elif isinstance(PTR, ootype.OOType): - return ootype.identityhash(ptr) # XXX imprecise raise NotImplementedError("gc_id on %r" % (PTR,)) def op_gc_set_max_heap_size(self, maxsize): @@ -1117,84 +1100,6 @@ exc_data.exc_value = lltype.typeOf(evalue)._defl() return bool(etype) - #Operation of ootype - - def op_new(self, INST): - assert isinstance(INST, (ootype.Instance, ootype.BuiltinType)) - return ootype.new(INST) - - def op_oonewarray(self, ARRAY, length): - assert isinstance(ARRAY, ootype.Array) - assert is_valid_int(length) - return ootype.oonewarray(ARRAY, length) - - def op_runtimenew(self, class_): - return ootype.runtimenew(class_) - - def op_oonewcustomdict(self, DICT, eq_func, eq_obj, eq_method_name, - hash_func, hash_obj, hash_method_name): - eq_name, interp_eq = \ - wrap_callable(self.llinterpreter, eq_func, eq_obj, eq_method_name) - EQ_FUNC = ootype.StaticMethod([DICT._KEYTYPE, DICT._KEYTYPE], ootype.Bool) - sm_eq = ootype.static_meth(EQ_FUNC, eq_name, _callable=interp_eq) - - hash_name, interp_hash = \ - wrap_callable(self.llinterpreter, hash_func, hash_obj, hash_method_name) - HASH_FUNC = ootype.StaticMethod([DICT._KEYTYPE], ootype.Signed) - sm_hash = ootype.static_meth(HASH_FUNC, hash_name, _callable=interp_hash) - - # XXX: is it fine to have StaticMethod type for bound methods, too? - return ootype.oonewcustomdict(DICT, sm_eq, sm_hash) - - def op_oosetfield(self, inst, name, value): - checkinst(inst) - assert isinstance(name, str) - FIELDTYPE = lltype.typeOf(inst)._field_type(name) - if FIELDTYPE is not lltype.Void: - setattr(inst, name, value) - - def op_oogetfield(self, inst, name): - checkinst(inst) - assert isinstance(name, str) - return getattr(inst, name) - - def op_oosend(self, message, inst, *args): - checkinst(inst) - assert isinstance(message, str) - bm = getattr(inst, message) - inst = bm.inst - m = bm.meth - args = m._checkargs(args, check_callable=False) - if getattr(m, 'abstract', False): - raise RuntimeError("calling abstract method %r" % (m,)) - return self.perform_call(m, (lltype.typeOf(inst),)+lltype.typeOf(m).ARGS, [inst]+args) - - def op_oostring(self, obj, base): - return ootype.oostring(obj, base) - - def op_oounicode(self, obj, base): - try: - return ootype.oounicode(obj, base) - except UnicodeDecodeError: - self.make_llexception() - - def op_ooparse_int(self, s, base): - try: - return ootype.ooparse_int(s, base) - except ValueError: - self.make_llexception() - - def op_ooparse_float(self, s): - try: - return ootype.ooparse_float(s) - except ValueError: - self.make_llexception() - - def op_oobox_int(self, i): - return ootype.oobox_int(i) - - def op_oounbox_int(self, x): - return ootype.oounbox_int(x) class Tracer(object): Counter = 0 diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -8,8 +8,7 @@ class LLOp(object): def __init__(self, sideeffects=True, canfold=False, canraise=(), - canmallocgc=False, canrun=False, oo=False, - tryfold=False): + canmallocgc=False, canrun=False, tryfold=False): # self.opname = ... (set afterwards) if canfold: @@ -42,9 +41,6 @@ # The operation can be run directly with __call__ self.canrun = canrun or canfold - # The operation belongs to the ootypesystem - self.oo = oo - # __________ make the LLOp instances callable from LL helpers __________ __name__ = property(lambda self: 'llop_'+self.opname) @@ -68,10 +64,7 @@ global lltype # <- lazy import hack, worth an XXX from rpython.rtyper.lltypesystem import lltype if self.canrun: - if self.oo: - from rpython.rtyper.ootypesystem.ooopimpl import get_op_impl - else: - from rpython.rtyper.lltypesystem.opimpl import get_op_impl + from rpython.rtyper.lltypesystem.opimpl import get_op_impl op_impl = get_op_impl(self.opname) else: error = TypeError("cannot constant-fold operation %r" % ( @@ -87,14 +80,10 @@ return True if self is llop.debug_assert: # debug_assert is pure enough return True - # reading from immutable (lltype) + # reading from immutable if self is llop.getfield or self is llop.getarrayitem: field = getattr(args_v[1], 'value', None) return args_v[0].concretetype.TO._immutable_field(field) - # reading from immutable (ootype) (xxx what about arrays?) - if self is llop.oogetfield: - field = getattr(args_v[1], 'value', None) - return args_v[0].concretetype._immutable_field(field) # default return False @@ -350,7 +339,7 @@ 'lllong_lshift': LLOp(canfold=True), # args (r_longlonglong, int) 'lllong_rshift': LLOp(canfold=True), # args (r_longlonglong, int) 'lllong_xor': LLOp(canfold=True), - + 'cast_primitive': LLOp(canfold=True), 'cast_bool_to_int': LLOp(canfold=True), 'cast_bool_to_uint': LLOp(canfold=True), @@ -573,32 +562,6 @@ # __________ instrumentation _________ 'instrument_count': LLOp(), - - # __________ ootype operations __________ - 'new': LLOp(oo=True, canraise=(MemoryError,)), - 'runtimenew': LLOp(oo=True, canraise=(MemoryError,)), - 'oonewcustomdict': LLOp(oo=True, canraise=(MemoryError,)), - 'oonewarray': LLOp(oo=True, canraise=(MemoryError,)), - 'oosetfield': LLOp(oo=True), - 'oogetfield': LLOp(oo=True, sideeffects=False, canrun=True), - 'oosend': LLOp(oo=True, canraise=(Exception,)), - 'ooupcast': LLOp(oo=True, canfold=True), - 'oodowncast': LLOp(oo=True, canfold=True), - 'cast_to_object': LLOp(oo=True, canfold=True), - 'cast_from_object': LLOp(oo=True, canfold=True), - 'oononnull': LLOp(oo=True, canfold=True), - 'ooisnot': LLOp(oo=True, canfold=True), - 'ooisnull': LLOp(oo=True, canfold=True), - 'oois': LLOp(oo=True, canfold=True), - 'instanceof': LLOp(oo=True, canfold=True), - 'classof': LLOp(oo=True, canfold=True), - 'subclassof': LLOp(oo=True, canfold=True), - 'oostring': LLOp(oo=True, sideeffects=False), - 'oobox_int': LLOp(oo=True, sideeffects=False), - 'oounbox_int': LLOp(oo=True, sideeffects=False), - 'ooparse_int': LLOp(oo=True, canraise=(ValueError,)), - 'ooparse_float': LLOp(oo=True, canraise=(ValueError,)), - 'oounicode': LLOp(oo=True, canraise=(UnicodeDecodeError,)), } # ***** Run test_lloperation after changes. ***** diff --git a/rpython/rtyper/lltypesystem/test/test_lloperation.py b/rpython/rtyper/lltypesystem/test/test_lloperation.py --- a/rpython/rtyper/lltypesystem/test/test_lloperation.py +++ b/rpython/rtyper/lltypesystem/test/test_lloperation.py @@ -1,7 +1,6 @@ import py from rpython.rtyper.lltypesystem.lloperation import LL_OPERATIONS, llop, void from rpython.rtyper.lltypesystem import lltype, opimpl -from rpython.rtyper.ootypesystem import ootype, ooopimpl from rpython.rtyper.llinterp import LLFrame from rpython.rtyper.test.test_llinterp import interpret from rpython.rtyper import rclass @@ -16,10 +15,7 @@ for opname, llop in LL_OPERATIONS.items(): assert opname == llop.opname if llop.canfold: - if llop.oo: - func = ooopimpl.get_op_impl(opname) - else: - func = opimpl.get_op_impl(opname) + func = opimpl.get_op_impl(opname) assert callable(func) def test_llop_fold(): @@ -48,14 +44,13 @@ def llf(): s = lltype.malloc(S) llop.bare_setfield(lltype.Void, s, void('x'), 3) - llop.bare_setfield(lltype.Void, s, name_y, 2) + llop.bare_setfield(lltype.Void, s, name_y, 2) return s.x + s.y res = interpret(llf, [], policy=LowLevelAnnotatorPolicy()) assert res == 5 def test_is_pure(): from rpython.flowspace.model import Variable, Constant - from rpython.rtyper import rclass assert llop.bool_not.is_pure([Variable()]) assert llop.debug_assert.is_pure([Variable()]) assert not llop.int_add_ovf.is_pure([Variable(), Variable()]) From noreply at buildbot.pypy.org Tue Jul 23 20:04:56 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 23 Jul 2013 20:04:56 +0200 (CEST) Subject: [pypy-commit] pypy add-statvfs: PyPy level statvfs and fstatvfs seem to work Message-ID: <20130723180456.822ED1C0149@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: add-statvfs Changeset: r65565:b0c0ac749b6b Date: 2013-07-23 09:59 -0700 http://bitbucket.org/pypy/pypy/changeset/b0c0ac749b6b/ Log: PyPy level statvfs and fstatvfs seem to work diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -32,21 +33,21 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'statvfs_result': 'app_posix.statvfs_result', - 'fdopen' : 'app_posix.fdopen', - 'tmpfile' : 'app_posix.tmpfile', - 'popen' : 'app_posix.popen', - 'tmpnam' : 'app_posix.tmpnam', - 'tempnam' : 'app_posix.tempnam', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'fdopen': 'app_posix.fdopen', + 'tmpfile': 'app_posix.tmpfile', + 'popen': 'app_posix.popen', + 'tmpnam': 'app_posix.tmpnam', + 'tempnam': 'app_posix.tempnam', } if os.name == 'nt': appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -54,44 +55,49 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'fstatvfs': 'interp_posix.fstatvfs', + 'statvfs': 'interp_posix.statvfs', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', } if hasattr(os, 'chown'): @@ -168,8 +174,8 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', 'setregid', 'getsid', 'setsid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) @@ -178,7 +184,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -187,7 +193,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -195,7 +201,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,15 +1,17 @@ -from pypy.interpreter.gateway import unwrap_spec +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat from pypy.module.sys.interp_encoding import getfilesystemencoding -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -213,6 +215,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -253,6 +256,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -314,6 +327,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstat(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -169,7 +169,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -183,6 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -154,6 +154,15 @@ else: return os.lstat(path.as_bytes()) + + at specialize.argtype(0) +def statvfs(path): + if isinstance(path, str): + return os.statvfs(path) + else: + return os.statvfs(path.as_bytes()) + + @specialize.argtype(0) def unlink(path): if isinstance(path, str): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1698,6 +1698,12 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_stat_variant('lstat', traits) + @registering(os.fstatvfs) + def register_os_fstatvfs(self): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) + + # ------------------------------- os.W* --------------------------------- w_star = ['WCOREDUMP', 'WIFCONTINUED', 'WIFSTOPPED', diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -2,20 +2,22 @@ and os.fstat(). In RPython like in plain Python the stat result can be indexed like a tuple but also exposes the st_xxx attributes. """ -import os, sys + +import os +import sys + from rpython.annotator import model as annmodel -from rpython.tool.pairtype import pairtype -from rpython.tool.sourcetools import func_with_new_name, func_renamer -from rpython.rtyper import extregistry -from rpython.rtyper.extfunc import register_external, extdef -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask -from rpython.rlib.objectmodel import specialize +from rpython.rtyper import extregistry +from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.extfunc import extdef +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.tool import rffi_platform as platform +from rpython.tool.pairtype import pairtype +from rpython.tool.sourcetools import func_renamer from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.annlowlevel import hlstr # Support for float times is here. # - ALL_STAT_FIELDS contains Float fields if the system can retrieve @@ -47,12 +49,26 @@ ("st_flags", lltype.Signed), #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented - ] +] N_INDEXABLE_FIELDS = 10 # For OO backends, expose only the portable fields (the first 10). PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] +STATVFS_FIELDS = [ + ("f_bsize", lltype.Signed), + ("f_frsize", lltype.Signed), + ("f_blocks", lltype.Signed), + ("f_bfree", lltype.Signed), + ("f_bavail", lltype.Signed), + ("f_files", lltype.Signed), + ("f_ffree", lltype.Signed), + ("f_favail", lltype.Signed), + ("f_flag", lltype.Signed), + ("f_namemax", lltype.Signed), +] + + # ____________________________________________________________ # # Annotation support @@ -79,6 +95,7 @@ def stat_result_reduce(st): return (st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7], st[8], st[9]) + def stat_result_recreate(tup): return make_stat_result(tup + extra_zeroes) s_reduced = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) @@ -86,6 +103,23 @@ extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) return s_reduced, stat_result_reduce, stat_result_recreate + +class SomeStatvfsResult(annmodel.SomeObject): + knowntype = os.statvfs_result + + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.module import r_os_stat + return r_os_stat.StatvfsResultRepr(rtyper) + + def rtyper_makekey_ex(self, rtyper): + return self.__class__, + + def getattr(self, s_attr): + assert s_attr.is_constant() + TYPE = STATVFS_FIELD_TYPES[s_attr.const] + return annmodel.lltype_to_annotation(TYPE) + + class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): def getitem((s_sta, s_int)): assert s_int.is_constant(), "os.stat()[index]: index must be constant" @@ -94,8 +128,17 @@ name, TYPE = STAT_FIELDS[index] return annmodel.lltype_to_annotation(TYPE) + +class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): + def getitem((s_stat, s_int)): + assert s_int.is_constant() + name, TYPE = STATVFS_FIELDS[s_int.const] + return annmodel.lltype_to_annotation(TYPE) + + s_StatResult = SomeStatResult() + def make_stat_result(tup): """Turn a tuple into an os.stat_result object.""" positional = tup[:N_INDEXABLE_FIELDS] @@ -104,6 +147,11 @@ kwds[name] = tup[N_INDEXABLE_FIELDS + i] return os.stat_result(positional, kwds) + +def make_statvfs_result(tup): + return os.statvfs_result(tup) + + class MakeStatResultEntry(extregistry.ExtRegistryEntry): _about_ = make_stat_result @@ -120,16 +168,16 @@ if sys.platform.startswith('win'): _name_struct_stat = '_stati64' - INCLUDES = ['sys/types.h', 'sys/stat.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] else: _name_struct_stat = 'stat' - INCLUDES = ['sys/types.h', 'sys/stat.h', 'unistd.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] compilation_info = ExternalCompilationInfo( # This must be set to 64 on some systems to enable large file support. #pre_include_bits = ['#define _FILE_OFFSET_BITS 64'], # ^^^ nowadays it's always set in all C files we produce. - includes = INCLUDES + includes=INCLUDES ) if TIMESPEC is not None: @@ -141,7 +189,7 @@ def posix_declaration(try_to_add=None): - global STAT_STRUCT + global STAT_STRUCT, STATVFS_STRUCT LL_STAT_FIELDS = STAT_FIELDS[:] if try_to_add: @@ -173,15 +221,17 @@ class CConfig: _compilation_info_ = compilation_info STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) + STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS) + try: - config = platform.configure(CConfig, ignore_errors= - try_to_add is not None) + config = platform.configure(CConfig, ignore_errors=try_to_add is not None) except platform.CompilationError: if try_to_add: return # failed to add this field, give up raise STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) + STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT']) if try_to_add: STAT_FIELDS.append(try_to_add) @@ -202,6 +252,9 @@ STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] del _name, _TYPE +STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) +STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] + def build_stat_result(st): # only for LL backends @@ -233,6 +286,21 @@ return make_stat_result(result) +def build_statvfs_result(st): + return make_statvfs_result(( + st.c_f_bsize, + st.c_f_frsize, + st.c_f_blocks, + st.c_f_bfree, + st.c_f_bavail, + st.c_f_files, + st.c_f_ffree, + st.c_f_favail, + st.c_f_flag, + st.c_f_namemax + )) + + def register_stat_variant(name, traits): if name != 'fstat': arg_is_path = True @@ -301,6 +369,61 @@ [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl) + +def register_statvfs_variant(name, traits): + if name != 'fstatvfs': + arg_is_path = True + s_arg = traits.str0 + ARG1 = traits.CCHARP + else: + arg_is_path = False + s_arg = int + ARG1 = rffi.INT + + posix_mystatvfs = rffi.llexternal(name, + [ARG1, STATVFS_STRUCT], rffi.INT, + compilation_info=compilation_info + ) + + @func_renamer('os_%s_llimpl' % (name,)) + def posix_statvfs_llimpl(arg): + stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw') + try: + if arg_is_path: + arg = traits.str2charp(arg) + error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult)) + if arg_is_path: + traits.free_charp(arg) + if error != 0: + raise OSError(rposix.get_errno(), "os_?statvfs failed") + return build_statvfs_result(stresult) + finally: + lltype.free(stresult, flavor='raw') + + @func_renamer('os_%s_fake' % (name,)) + def posix_fakeimpl(arg): + if s_arg == traits.str0: + arg = hlstr(arg) + st = getattr(os, name)(arg) + fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS] + TP = TUPLE_TYPE(fields) + ll_tup = lltype.malloc(TP.TO) + for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS): + val = getattr(st, fieldname) + if isinstance(TYPE, lltype.Number): + rffi.setintfield(ll_tup, 'item%d' % i, int(val)) + elif TYPE is lltype.Float: + setattr(ll_tup, 'item%d' % i, float(val)) + else: + setattr(ll_tup, 'item%d' % i, val) + return ll_tup + + return extdef( + [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), + llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl + ) + + def make_win32_stat_impl(name, traits): from rpython.rlib import rwin32 from rpython.rtyper.module.ll_win32file import make_win32_traits From noreply at buildbot.pypy.org Tue Jul 23 20:04:59 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 23 Jul 2013 20:04:59 +0200 (CEST) Subject: [pypy-commit] pypy add-statvfs: Translates! Message-ID: <20130723180459.1620C1C0149@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: add-statvfs Changeset: r65566:daa8f7d9b0c2 Date: 2013-07-23 11:04 -0700 http://bitbucket.org/pypy/pypy/changeset/daa8f7d9b0c2/ Log: Translates! diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -331,7 +331,7 @@ @unwrap_spec(fd=c_int) def fstatvfs(space, fd): try: - st = os.fstat(fd) + st = os.fstatvfs(fd) except OSError as e: raise wrap_oserror(space, e) else: diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1703,6 +1703,11 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) + @registering_str_unicode(os.statvfs) + def register_os_statvfs(self, traits): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('statvfs', traits) + # ------------------------------- os.W* --------------------------------- diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -137,6 +137,7 @@ s_StatResult = SomeStatResult() +s_StatvfsResult = SomeStatvfsResult() def make_stat_result(tup): @@ -162,6 +163,17 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.specialize_make_stat_result(hop) + +class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_statvfs_result + + def compute_result_annotation(self, s_tup): + return s_StatvfsResult + + def specialize_call(self, hop): + from rpython.rtyper.module import r_os_stat + return r_os_stat.specialize_make_statvfs_result(hop) + # ____________________________________________________________ # # RFFI support @@ -410,16 +422,11 @@ ll_tup = lltype.malloc(TP.TO) for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS): val = getattr(st, fieldname) - if isinstance(TYPE, lltype.Number): - rffi.setintfield(ll_tup, 'item%d' % i, int(val)) - elif TYPE is lltype.Float: - setattr(ll_tup, 'item%d' % i, float(val)) - else: - setattr(ll_tup, 'item%d' % i, val) + rffi.setintfield(ll_tup, 'item%d' % i, int(val)) return ll_tup return extdef( - [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), + [s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,), llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl ) diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -67,3 +67,52 @@ # no-op conversion from r_StatResult.r_tuple to r_StatResult hop.exception_cannot_occur() return v_result + + +class StatvfsResultRepr(Repr): + + def __init__(self, rtyper): + self.rtyper = rtyper + self.statvfs_fields = ll_os_stat.STATVFS_FIELDS + + self.statvfs_field_indexes = {} + for i, (name, TYPE) in enumerate(self.statvfs_fields): + self.statvfs_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + for name, TYPE in self.statvfs_fields]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + hop2.forced_opname = 'getitem' + hop2.args_v = [hop2.args_v[0], Constant(index)] + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.statvfs_field_indexes[attr] + except KeyError: + raise TyperError("os.statvfs().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + + +def specialize_make_statvfs_result(hop): + r_StatvfsResult = hop.rtyper.getrepr(ll_os_stat.s_StatvfsResult) + [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) + hop.exception_cannot_occur() + return v_result From noreply at buildbot.pypy.org Wed Jul 24 10:47:14 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 10:47:14 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: cargo-cult until the rtyping passes - I have no clue what I'm doing Message-ID: <20130724084714.DA2FD1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65592:36e01240d91f Date: 2013-07-24 10:46 +0200 http://bitbucket.org/pypy/pypy/changeset/36e01240d91f/ Log: cargo-cult until the rtyping passes - I have no clue what I'm doing diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -332,7 +332,7 @@ key = key + key1 return funcdesc.cachedgraph(key, builder=builder) -def _get_key(args_s, argindices): +def specialize_argvalue(funcdesc, args_s, *argindices): from rpython.annotator.model import SomePBC key = [] for i in argindices: @@ -347,9 +347,6 @@ raise Exception("specialize:arg(%d): argument not constant: %r" % (i, s)) key = tuple(key) - -def specialize_argvalue(funcdesc, args_s, *argindices): - key = _get_key(args_s, argindices) return maybe_star_args(funcdesc, key, args_s) def specialize_arg_or_var(funcdesc, args_s, *argindices): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1022,7 +1022,8 @@ from rpython.rtyper.lltypesystem import lltype args_v = hop.inputargs(lltype.Bool, lltype.Void, *hop.args_r[2:]) - args_v[1] = hop.args_r[1].get_unique_llfn() + args_v[1] = hop.args_r[1].get_concrete_llfn(hop.args_s[1], + hop.args_s[2:], hop.spaceop) hop.exception_is_here() return hop.genop('jit_conditional_call', args_v) diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -302,6 +302,18 @@ llfn = self.rtyper.getcallable(graph) return inputconst(typeOf(llfn), llfn) + def get_concrete_llfn(self, s_pbc, args_s, op): + bk = self.rtyper.annotator.bookkeeper + descs = list(s_pbc.descriptions) + vfcs = description.FunctionDesc.variant_for_call_site + args = bk.build_args("simple_call", args_s) + shape, index = vfcs(bk, self.callfamily, descs, args, op) + funcdesc, = descs + row_of_one_graph = self.callfamily.calltables[shape][index] + graph = row_of_one_graph[funcdesc] + llfn = self.rtyper.getcallable(graph) + return inputconst(typeOf(llfn), llfn) + def rtype_simple_call(self, hop): return self.call('simple_call', hop) From noreply at buildbot.pypy.org Wed Jul 24 10:49:41 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 10:49:41 +0200 (CEST) Subject: [pypy-commit] pypy default: at the very least don't break importing rpython on top of pypy Message-ID: <20130724084941.D094B1C14BB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65593:562d2c9195d4 Date: 2013-07-24 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/562d2c9195d4/ Log: at the very least don't break importing rpython on top of pypy diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1698,15 +1698,16 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_stat_variant('lstat', traits) - @registering(os.fstatvfs) + @registering_if(os, 'fstatvfs') def register_os_fstatvfs(self): from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) - @registering_str_unicode(os.statvfs) - def register_os_statvfs(self, traits): - from rpython.rtyper.module import ll_os_stat - return ll_os_stat.register_statvfs_variant('statvfs', traits) + if hasattr(os, 'statvfs'): + @registering_str_unicode(os.statvfs) + def register_os_statvfs(self, traits): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('statvfs', traits) # ------------------------------- os.W* --------------------------------- From noreply at buildbot.pypy.org Wed Jul 24 11:15:25 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 24 Jul 2013 11:15:25 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Make bytearray tests pass. Message-ID: <20130724091525.631631C14B6@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65594:6689c40a01fe Date: 2013-07-24 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/6689c40a01fe/ Log: Make bytearray tests pass. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -3,7 +3,7 @@ from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature from pypy.objspace.std import bytesobject from pypy.objspace.std.intobject import W_IntObject @@ -11,7 +11,6 @@ from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.register_all import register_all from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice from pypy.objspace.std.stdtypedef import StdTypeDef, SMM from pypy.objspace.std.stringmethods import StringMethods @@ -36,10 +35,235 @@ return len(self.data) def _val(self, space): - return self.data + return space.bufferstr_w(self) - def _op_val(self, w_other): - return w_other.data + def _op_val(self, space, w_other): + return space.bufferstr_new_w(w_other) + + def _chr(self, char): + return str(char) + + _builder = StringBuilder + + def _newlist_unwrapped(self, space, res): + return space.wrap([W_BytearrayObject(list(i)) for i in res]) + + def _isupper(self, ch): + return ch.isupper() + + def _islower(self, ch): + return ch.islower() + + def _istitle(self, ch): + return ch.istitle() + + def _isspace(self, ch): + return ch.isspace() + + def _isalpha(self, ch): + return ch.isalpha() + + def _isalnum(self, ch): + return ch.isalnum() + + def _isdigit(self, ch): + return ch.isdigit() + + _iscased = _isalpha + + def _upper(self, ch): + if ch.islower(): + o = ord(ch) - 32 + return chr(o) + else: + return ch + + def _lower(self, ch): + if ch.isupper(): + o = ord(ch) + 32 + return chr(o) + else: + return ch + + def _join_return_one(self, space, w_obj): + return space.is_w(space.type(w_obj), space.w_unicode) + + def _join_check_item(self, space, w_obj): + if (space.is_w(space.type(w_obj), space.w_str) or + space.is_w(space.type(w_obj), space.w_bytearray)): + return 0 + return 1 + + def ord(self, space): + if len(self.data) != 1: + msg = "ord() expected a character, but string of length %d found" + raise operationerrfmt(space.w_TypeError, msg, len(self.data)) + return space.wrap(ord(self.data[0])) + + def descr_init(self, space, __args__): + # this is on the silly side + w_source, w_encoding, w_errors = __args__.parse_obj( + None, 'bytearray', init_signature, init_defaults) + + if w_source is None: + w_source = space.wrap('') + if w_encoding is None: + w_encoding = space.w_None + if w_errors is None: + w_errors = space.w_None + + # Unicode argument + if not space.is_w(w_encoding, space.w_None): + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object + ) + encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + + # if w_source is an integer this correctly raises a TypeError + # the CPython error message is: "encoding or errors without a string argument" + # ours is: "expected unicode, got int object" + w_source = encode_object(space, w_source, encoding, errors) + + # Is it an int? + try: + count = space.int_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + if count < 0: + raise OperationError(space.w_ValueError, + space.wrap("bytearray negative count")) + self.data = ['\0'] * count + return + + data = makebytearraydata_w(space, w_source) + self.data = data + + def descr_repr(self, space): + s = self.data + + # Good default if there are no replacements. + buf = StringBuilder(len("bytearray(b'')") + len(s)) + + buf.append("bytearray(b'") + + for i in range(len(s)): + c = s[i] + + if c == '\\' or c == "'": + buf.append('\\') + buf.append(c) + elif c == '\t': + buf.append('\\t') + elif c == '\r': + buf.append('\\r') + elif c == '\n': + buf.append('\\n') + elif not '\x20' <= c < '\x7f': + n = ord(c) + buf.append('\\x') + buf.append("0123456789abcdef"[n>>4]) + buf.append("0123456789abcdef"[n&0xF]) + else: + buf.append(c) + + buf.append("')") + + return space.wrap(buf.build()) + + def descr_str(self, space): + return space.wrap(''.join(self.data)) + + def descr_buffer(self, space): + return BytearrayBuffer(self.data) + + def descr_inplace_add(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += self._op_val(space, w_other) + return self + + def descr_inplace_mul(self, space, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + self.data *= times + return self + + def descr_setitem(self, space, w_index, w_other): + if isinstance(w_index, W_SliceObject): + oldsize = len(self.data) + start, stop, step, slicelength = w_index.indices4(space, oldsize) + sequence2 = makebytearraydata_w(space, w_other) + _setitem_slice_helper(space, self.data, start, step, + slicelength, sequence2, empty_elem='\x00') + else: + idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + try: + self.data[idx] = getbytevalue(space, w_other) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray index out of range")) + + def descr_delitem(self, space, w_idx): + if isinstance(w_idx, W_SliceObject): + start, stop, step, slicelength = w_idx.indices4(space, + len(self.data)) + _delitem_slice_helper(space, self.data, start, step, slicelength) + else: + idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + try: + del self.data[idx] + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray deletion index out of range")) + + def descr_append(self, space, w_item): + self.data.append(getbytevalue(space, w_item)) + + def descr_extend(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += makebytearraydata_w(space, w_other) + return self + + def descr_insert(self, space, w_idx, w_other): + where = space.int_w(w_idx) + length = len(self.data) + index = get_positive_index(where, length) + val = getbytevalue(space, w_other) + self.data.insert(index, val) + return space.w_None + + @unwrap_spec(w_idx=WrappedDefault(-1)) + def descr_pop(self, space, w_idx): + index = space.int_w(w_idx) + try: + result = self.data.pop(index) + except IndexError: + if not self.data: + raise OperationError(space.w_IndexError, space.wrap( + "pop from empty bytearray")) + raise OperationError(space.w_IndexError, space.wrap( + "pop index out of range")) + return space.wrap(ord(result)) + + def descr_remove(self, space, w_char): + char = space.int_w(space.index(w_char)) + try: + result = self.data.remove(chr(char)) + except ValueError: + raise OperationError(space.w_ValueError, space.wrap( + "value not found in bytearray")) + + def descr_reverse(self, space): + self.data.reverse() W_BytearrayObject.EMPTY = W_BytearrayObject([]) @@ -200,110 +424,82 @@ __reduce__ = interp2app(descr_bytearray__reduce__), fromhex = interp2app(descr_fromhex, as_classmethod=True), -# __repr__ = interp2app(W_BytearrayObject.descr_repr), -# __str__ = interp2app(W_BytearrayObject.descr_str), + __repr__ = interp2app(W_BytearrayObject.descr_repr), + __str__ = interp2app(W_BytearrayObject.descr_str), -# __eq__ = interp2app(W_BytearrayObject.descr_eq), -# __ne__ = interp2app(W_BytearrayObject.descr_ne), -# __lt__ = interp2app(W_BytearrayObject.descr_lt), -# __le__ = interp2app(W_BytearrayObject.descr_le), -# __gt__ = interp2app(W_BytearrayObject.descr_gt), -# __ge__ = interp2app(W_BytearrayObject.descr_ge), + __eq__ = interp2app(W_BytearrayObject.descr_eq), + __ne__ = interp2app(W_BytearrayObject.descr_ne), + __lt__ = interp2app(W_BytearrayObject.descr_lt), + __le__ = interp2app(W_BytearrayObject.descr_le), + __gt__ = interp2app(W_BytearrayObject.descr_gt), + __ge__ = interp2app(W_BytearrayObject.descr_ge), -# __len__ = interp2app(W_BytearrayObject.descr_len), -# __iter__ = interp2app(W_BytearrayObject.descr_iter), -# __contains__ = interp2app(W_BytearrayObject.descr_contains), + __len__ = interp2app(W_BytearrayObject.descr_len), + __contains__ = interp2app(W_BytearrayObject.descr_contains), -# __add__ = interp2app(W_BytearrayObject.descr_add), + __add__ = interp2app(W_BytearrayObject.descr_add), __mul__ = interp2app(W_BytearrayObject.descr_mul), __rmul__ = interp2app(W_BytearrayObject.descr_mul), -# __getitem__ = interp2app(W_BytearrayObject.descr_getitem), + __getitem__ = interp2app(W_BytearrayObject.descr_getitem), -# capitalize = interp2app(W_BytearrayObject.descr_capitalize), -# center = interp2app(W_BytearrayObject.descr_center), -# count = interp2app(W_BytearrayObject.descr_count), -# decode = interp2app(W_BytearrayObject.descr_decode), -# expandtabs = interp2app(W_BytearrayObject.descr_expandtabs), -# find = interp2app(W_BytearrayObject.descr_find), -# rfind = interp2app(W_BytearrayObject.descr_rfind), -# index = interp2app(W_BytearrayObject.descr_index), -# rindex = interp2app(W_BytearrayObject.descr_rindex), -# isalnum = interp2app(W_BytearrayObject.descr_isalnum), -# isalpha = interp2app(W_BytearrayObject.descr_isalpha), -# isdigit = interp2app(W_BytearrayObject.descr_isdigit), -# islower = interp2app(W_BytearrayObject.descr_islower), -# isspace = interp2app(W_BytearrayObject.descr_isspace), -# istitle = interp2app(W_BytearrayObject.descr_istitle), -# isupper = interp2app(W_BytearrayObject.descr_isupper), -# join = interp2app(W_BytearrayObject.descr_join), -# ljust = interp2app(W_BytearrayObject.descr_ljust), -# rjust = interp2app(W_BytearrayObject.descr_rjust), -# lower = interp2app(W_BytearrayObject.descr_lower), -# partition = interp2app(W_BytearrayObject.descr_partition), -# rpartition = interp2app(W_BytearrayObject.descr_rpartition), -# replace = interp2app(W_BytearrayObject.descr_replace), -# split = interp2app(W_BytearrayObject.descr_split), -# rsplit = interp2app(W_BytearrayObject.descr_rsplit), -# splitlines = interp2app(W_BytearrayObject.descr_splitlines), -# startswith = interp2app(W_BytearrayObject.descr_startswith), -# endswith = interp2app(W_BytearrayObject.descr_endswith), -# strip = interp2app(W_BytearrayObject.descr_strip), -# lstrip = interp2app(W_BytearrayObject.descr_lstrip), -# rstrip = interp2app(W_BytearrayObject.descr_rstrip), -# swapcase = interp2app(W_BytearrayObject.descr_swapcase), -# title = interp2app(W_BytearrayObject.descr_title), -# translate = interp2app(W_BytearrayObject.descr_translate), -# upper = interp2app(W_BytearrayObject.descr_upper), -# zfill = interp2app(W_BytearrayObject.descr_zfill), + capitalize = interp2app(W_BytearrayObject.descr_capitalize), + center = interp2app(W_BytearrayObject.descr_center), + count = interp2app(W_BytearrayObject.descr_count), + decode = interp2app(W_BytearrayObject.descr_decode), + expandtabs = interp2app(W_BytearrayObject.descr_expandtabs), + find = interp2app(W_BytearrayObject.descr_find), + rfind = interp2app(W_BytearrayObject.descr_rfind), + index = interp2app(W_BytearrayObject.descr_index), + rindex = interp2app(W_BytearrayObject.descr_rindex), + isalnum = interp2app(W_BytearrayObject.descr_isalnum), + isalpha = interp2app(W_BytearrayObject.descr_isalpha), + isdigit = interp2app(W_BytearrayObject.descr_isdigit), + islower = interp2app(W_BytearrayObject.descr_islower), + isspace = interp2app(W_BytearrayObject.descr_isspace), + istitle = interp2app(W_BytearrayObject.descr_istitle), + isupper = interp2app(W_BytearrayObject.descr_isupper), + join = interp2app(W_BytearrayObject.descr_join), + ljust = interp2app(W_BytearrayObject.descr_ljust), + rjust = interp2app(W_BytearrayObject.descr_rjust), + lower = interp2app(W_BytearrayObject.descr_lower), + partition = interp2app(W_BytearrayObject.descr_partition), + rpartition = interp2app(W_BytearrayObject.descr_rpartition), + replace = interp2app(W_BytearrayObject.descr_replace), + split = interp2app(W_BytearrayObject.descr_split), + rsplit = interp2app(W_BytearrayObject.descr_rsplit), + splitlines = interp2app(W_BytearrayObject.descr_splitlines), + startswith = interp2app(W_BytearrayObject.descr_startswith), + endswith = interp2app(W_BytearrayObject.descr_endswith), + strip = interp2app(W_BytearrayObject.descr_strip), + lstrip = interp2app(W_BytearrayObject.descr_lstrip), + rstrip = interp2app(W_BytearrayObject.descr_rstrip), + swapcase = interp2app(W_BytearrayObject.descr_swapcase), + title = interp2app(W_BytearrayObject.descr_title), + translate = interp2app(W_BytearrayObject.descr_translate), + upper = interp2app(W_BytearrayObject.descr_upper), + zfill = interp2app(W_BytearrayObject.descr_zfill), + + __init__ = interp2app(W_BytearrayObject.descr_init), + __buffer__ = interp2app(W_BytearrayObject.descr_buffer), + + __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add), + __imul__ = interp2app(W_BytearrayObject.descr_inplace_mul), + __setitem__ = interp2app(W_BytearrayObject.descr_setitem), + __delitem__ = interp2app(W_BytearrayObject.descr_delitem), + + append = interp2app(W_BytearrayObject.descr_append), + extend = interp2app(W_BytearrayObject.descr_extend), + insert = interp2app(W_BytearrayObject.descr_insert), + pop = interp2app(W_BytearrayObject.descr_pop), + remove = interp2app(W_BytearrayObject.descr_remove), + reverse = interp2app(W_BytearrayObject.descr_reverse), ) -bytearray_typedef.registermethods(globals()) - registerimplementation(W_BytearrayObject) init_signature = Signature(['source', 'encoding', 'errors'], None, None) init_defaults = [None, None, None] -def init__Bytearray(space, w_bytearray, __args__): - # this is on the silly side - w_source, w_encoding, w_errors = __args__.parse_obj( - None, 'bytearray', init_signature, init_defaults) - - if w_source is None: - w_source = space.wrap('') - if w_encoding is None: - w_encoding = space.w_None - if w_errors is None: - w_errors = space.w_None - - # Unicode argument - if not space.is_w(w_encoding, space.w_None): - from pypy.objspace.std.unicodeobject import ( - _get_encoding_and_errors, encode_object - ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" - w_source = encode_object(space, w_source, encoding, errors) - - # Is it an int? - try: - count = space.int_w(w_source) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) - w_bytearray.data = ['\0'] * count - return - - data = makebytearraydata_w(space, w_source) - w_bytearray.data = data - def len__Bytearray(space, w_bytearray): result = len(w_bytearray.data) return wrapint(space, result) @@ -379,16 +575,6 @@ data1 = [c for c in space.str_w(w_str)] return W_BytearrayObject(data1 + data2) -def inplace_mul__Bytearray_ANY(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - w_bytearray.data *= times - return w_bytearray - def eq__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): data1 = w_bytearray1.data data2 = w_bytearray2.data @@ -444,43 +630,6 @@ # No more items to compare -- compare sizes return space.newbool(len(data1) > len(data2)) -# Mostly copied from repr__String, but without the "smart quote" -# functionality. -def repr__Bytearray(space, w_bytearray): - s = w_bytearray.data - - # Good default if there are no replacements. - buf = StringBuilder(len("bytearray(b'')") + len(s)) - - buf.append("bytearray(b'") - - for i in range(len(s)): - c = s[i] - - if c == '\\' or c == "'": - buf.append('\\') - buf.append(c) - elif c == '\t': - buf.append('\\t') - elif c == '\r': - buf.append('\\r') - elif c == '\n': - buf.append('\\n') - elif not '\x20' <= c < '\x7f': - n = ord(c) - buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) - else: - buf.append(c) - - buf.append("')") - - return space.wrap(buf.build()) - -def str__Bytearray(space, w_bytearray): - return space.wrap(''.join(w_bytearray.data)) - def str_count__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): w_char = space.wrap(space.bufferstr_new_w(w_char)) w_str = str__Bytearray(space, w_bytearray) @@ -586,38 +735,6 @@ w_str = str__Bytearray(space, w_bytearray) return bytesobject.str_isspace__String(space, w_str) -def bytearray_insert__Bytearray_Int_ANY(space, w_bytearray, w_idx, w_other): - where = space.int_w(w_idx) - length = len(w_bytearray.data) - index = get_positive_index(where, length) - val = getbytevalue(space, w_other) - w_bytearray.data.insert(index, val) - return space.w_None - -def bytearray_pop__Bytearray_Int(space, w_bytearray, w_idx): - index = space.int_w(w_idx) - try: - result = w_bytearray.data.pop(index) - except IndexError: - if not w_bytearray.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) - return space.wrap(ord(result)) - -def bytearray_remove__Bytearray_ANY(space, w_bytearray, w_char): - char = space.int_w(space.index(w_char)) - try: - result = w_bytearray.data.remove(chr(char)) - except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) - -def bytearray_reverse__Bytearray(space, w_bytearray): - w_bytearray.data.reverse() - return space.w_None - _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) def bytearray_strip__Bytearray_None(space, w_bytearray, w_chars): @@ -638,54 +755,6 @@ def bytearray_rstrip__Bytearray_ANY(space, w_bytearray, w_chars): return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 0, 1) -# __________________________________________________________ -# Mutability methods - -def bytearray_append__Bytearray_ANY(space, w_bytearray, w_item): - w_bytearray.data.append(getbytevalue(space, w_item)) - -def bytearray_extend__Bytearray_Bytearray(space, w_bytearray, w_other): - w_bytearray.data += w_other.data - -def bytearray_extend__Bytearray_ANY(space, w_bytearray, w_other): - w_bytearray.data += makebytearraydata_w(space, w_other) - -def inplace_add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - bytearray_extend__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2) - return w_bytearray1 - -def inplace_add__Bytearray_ANY(space, w_bytearray1, w_iterable2): - w_bytearray1.data += space.bufferstr_new_w(w_iterable2) - return w_bytearray1 - -def setitem__Bytearray_ANY_ANY(space, w_bytearray, w_index, w_item): - idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") - try: - w_bytearray.data[idx] = getbytevalue(space, w_item) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) - -def setitem__Bytearray_Slice_ANY(space, w_bytearray, w_slice, w_other): - oldsize = len(w_bytearray.data) - start, stop, step, slicelength = w_slice.indices4(space, oldsize) - sequence2 = makebytearraydata_w(space, w_other) - _setitem_slice_helper(space, w_bytearray.data, start, step, slicelength, sequence2, empty_elem='\x00') - -def delitem__Bytearray_ANY(space, w_bytearray, w_idx): - idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") - try: - del w_bytearray.data[idx] - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray deletion index out of range")) - return space.w_None - -def delitem__Bytearray_Slice(space, w_bytearray, w_slice): - start, stop, step, slicelength = w_slice.indices4(space, - len(w_bytearray.data)) - _delitem_slice_helper(space, w_bytearray.data, start, step, slicelength) - #XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): if slicelength==0: @@ -802,5 +871,3 @@ def buffer__Bytearray(space, self): b = BytearrayBuffer(self.data) return space.wrap(b) - -#register_all(vars(), globals()) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -177,8 +177,12 @@ def descr_add(self, space, w_other): if space.isinstance_w(w_other, space.w_unicode): self_as_unicode = decode_object(space, self, None, None) - #return self_as_unicode.descr_add(space, w_other) return space.add(self_as_unicode, w_other) + elif space.isinstance_w(w_other, space.w_bytearray): + # XXX: eliminate double-copy + from .bytearrayobject import W_BytearrayObject + self_as_bytearray = W_BytearrayObject(list(self._value)) + return space.add(self_as_bytearray, w_other) return StringMethods.descr_add(self, space, w_other) def _startswith(self, space, value, w_prefix, start, end): @@ -266,7 +270,6 @@ __ge__ = interp2app(W_BytesObject.descr_ge), __len__ = interp2app(W_BytesObject.descr_len), - #__iter__ = interp2app(W_BytesObject.descr_iter), __contains__ = interp2app(W_BytesObject.descr_contains), __add__ = interp2app(W_BytesObject.descr_add), diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -104,6 +104,17 @@ # pass def descr_contains(self, space, w_sub): + from pypy.objspace.std.bytearrayobject import W_BytearrayObject + if (isinstance(self, W_BytearrayObject) and + space.isinstance_w(w_sub, space.w_int)): + char = space.int_w(w_sub) + if not 0 <= char < 256: + raise OperationError(space.w_ValueError, + space.wrap("byte must be in range(0, 256)")) + for c in self.data: + if ord(c) == char: + return space.w_True + return space.w_False return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) def descr_add(self, space, w_other): @@ -144,6 +155,9 @@ if index < 0 or index >= selflen: raise OperationError(space.w_IndexError, space.wrap("string index out of range")) + from pypy.objspace.std.bytearrayobject import W_BytearrayObject + if isinstance(self, W_BytearrayObject): + return space.wrap(ord(selfvalue[index])) #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) @@ -165,7 +179,7 @@ builder.append(self._upper(value[0])) for i in range(1, len(value)): builder.append(self._lower(value[i])) - return space.wrap(builder.build()) + return self._new(builder.build()) @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) def descr_center(self, space, width, w_fillchar): @@ -419,7 +433,7 @@ fillchar = fillchar[0] # annotator hint: it's a single character value += d * fillchar - return space.wrap(value) + return self._new(value) @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) def descr_rjust(self, space, width, w_fillchar): @@ -434,7 +448,7 @@ fillchar = fillchar[0] # annotator hint: it's a single character value = d * fillchar + value - return space.wrap(value) + return self._new(value) def descr_lower(self, space): value = self._val(space) @@ -453,6 +467,9 @@ if pos == -1: return space.newtuple([self, self.EMPTY, self.EMPTY]) else: + from pypy.objspace.std.bytearrayobject import W_BytearrayObject + if isinstance(self, W_BytearrayObject): + w_sub = self._new(sub) return space.newtuple( [self._sliced(space, value, 0, pos, value), w_sub, self._sliced(space, value, pos+len(sub), len(value), value)]) @@ -467,6 +484,9 @@ if pos == -1: return space.newtuple([self.EMPTY, self.EMPTY, self]) else: + from pypy.objspace.std.bytearrayobject import W_BytearrayObject + if isinstance(self, W_BytearrayObject): + w_sub = self._new(sub) return space.newtuple( [self._sliced(space, value, 0, pos, value), w_sub, self._sliced(space, value, pos+len(sub), len(value), value)]) @@ -481,7 +501,7 @@ except OverflowError: raise OperationError(space.w_OverflowError, space.wrap("replace string is too long")) - return space.wrap(res) + return self._new(res) @unwrap_spec(maxsplit=int) def descr_split(self, space, w_sep=None, maxsplit=-1): @@ -682,7 +702,7 @@ builder.append(self._upper(ch)) else: builder.append(ch) - return space.wrap(builder.build()) + return self._new(builder.build()) def descr_title(self, space): selfval = self._val(space) @@ -698,7 +718,7 @@ else: builder.append(self._lower(ch)) previous_is_cased = self._iscased(ch) - return space.wrap(builder.build()) + return self._new(builder.build()) DEFAULT_NOOP_TABLE = ''.join([chr(i) for i in range(256)]) @@ -745,7 +765,7 @@ num_zeros = width - len(selfval) if num_zeros <= 0: # cannot return self, in case it is a subclass of str - return space.wrap(selfval) + return self._new(selfval) builder = self._builder(width) if len(selfval) > 0 and (selfval[0] == '+' or selfval[0] == '-'): @@ -756,7 +776,7 @@ start = 0 builder.append_multiple_char(self._chr('0'), num_zeros) builder.append_slice(selfval, start, len(selfval)) - return space.wrap(builder.build()) + return self._new(builder.build()) def descr_getnewargs(self, space): return space.newtuple([self._new(self._val(space))]) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -300,6 +300,12 @@ def unicode_from_encoded_object(space, w_obj, encoding, errors): + # explicitly block bytearray on 2.7 + from .bytearrayobject import W_BytearrayObject + if isinstance(w_obj, W_BytearrayObject): + raise OperationError(space.w_TypeError, + space.wrap("decoding bytearray is not supported")) + w_retval = decode_object(space, w_obj, encoding, errors) if not space.isinstance_w(w_retval, space.w_unicode): raise operationerrfmt(space.w_TypeError, @@ -405,7 +411,6 @@ __ge__ = interp2app(W_UnicodeObject.descr_ge), __len__ = interp2app(W_UnicodeObject.descr_len), - #__iter__ = interp2app(W_UnicodeObject.descr_iter), __contains__ = interp2app(W_UnicodeObject.descr_contains), __add__ = interp2app(W_UnicodeObject.descr_add), From noreply at buildbot.pypy.org Wed Jul 24 11:25:38 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 11:25:38 +0200 (CEST) Subject: [pypy-commit] pypy default: fix translation, maybe Message-ID: <20130724092538.D8EEE1C02A1@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65595:c7b6692d954c Date: 2013-07-24 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/c7b6692d954c/ Log: fix translation, maybe diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -105,7 +105,10 @@ class SomeStatvfsResult(annmodel.SomeObject): - knowntype = os.statvfs_result + if hasattr(os, 'statvfs_result'): + knowntype = os.statvfs_result + else: + knowntype = None # will not be used def rtyper_makerepr(self, rtyper): from rpython.rtyper.module import r_os_stat From noreply at buildbot.pypy.org Wed Jul 24 11:30:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 11:30:18 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add planning.txt with two tasks I have Message-ID: <20130724093018.C90931C02A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5000:fe4419528718 Date: 2013-07-24 11:30 +0200 http://bitbucket.org/pypy/extradoc/changeset/fe4419528718/ Log: Add planning.txt with two tasks I have diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/london-2013/planning.txt @@ -0,0 +1,8 @@ +Tasks +----- + +* fix the issue with the os.xyz attributes not showing up depending on + which host we use during translation + +* cffi 1.0: think about how to separate compilation from execution of + the script (e.g. a separate foo.c file, and "python -m cffi foo.c") From noreply at buildbot.pypy.org Wed Jul 24 11:34:54 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 24 Jul 2013 11:34:54 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Update sprint task. Message-ID: <20130724093454.E56281C13EE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: extradoc Changeset: r5001:f9d62646bdbb Date: 2013-07-24 11:34 +0200 http://bitbucket.org/pypy/extradoc/changeset/f9d62646bdbb/ Log: Update sprint task. diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -2,7 +2,8 @@ ----- * fix the issue with the os.xyz attributes not showing up depending on - which host we use during translation + which host we use during translation. + Maybe revive branch 'missing-os-functions'. * cffi 1.0: think about how to separate compilation from execution of the script (e.g. a separate foo.c file, and "python -m cffi foo.c") From noreply at buildbot.pypy.org Wed Jul 24 11:44:19 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 24 Jul 2013 11:44:19 +0200 (CEST) Subject: [pypy-commit] pypy default: Try to finally fix translation. Message-ID: <20130724094419.0BCB01C14B6@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r65596:7c7aa74c7b6f Date: 2013-07-24 11:41 +0200 http://bitbucket.org/pypy/pypy/changeset/7c7aa74c7b6f/ Log: Try to finally fix translation. diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -70,9 +70,6 @@ 'lstat': 'interp_posix.lstat', 'stat_float_times': 'interp_posix.stat_float_times', - 'fstatvfs': 'interp_posix.fstatvfs', - 'statvfs': 'interp_posix.statvfs', - 'dup': 'interp_posix.dup', 'dup2': 'interp_posix.dup2', 'access': 'interp_posix.access', @@ -176,7 +173,7 @@ for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: From noreply at buildbot.pypy.org Wed Jul 24 12:01:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 12:01:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Add comments Message-ID: <20130724100157.B40C01C15A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65597:b924d55fe87a Date: 2013-07-24 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/b924d55fe87a/ Log: Add comments diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -735,6 +735,10 @@ self.mc.RET() def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + """Loads the shadowstack top in ebx, and returns an integer + that gives the address of the stack top. If this integer doesn't + fit in 32 bits, it will be loaded in r11. + """ rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] @@ -752,6 +756,9 @@ if rx86.fits_in_32bits(rst): self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: + # The integer 'rst' doesn't fit in 32 bits, so we know that + # _load_shadowstack_top_in_ebx() above loaded it in r11. + # Reuse it. Be careful not to overwrite r11 in the middle! self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), ebx.value) # MOV [r11], ebx From noreply at buildbot.pypy.org Wed Jul 24 12:08:08 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 24 Jul 2013 12:08:08 +0200 (CEST) Subject: [pypy-commit] pypy missing-os-functions: hg merge default again Message-ID: <20130724100808.2FED01C15A9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: missing-os-functions Changeset: r65599:d8ba553ece2d Date: 2013-07-24 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/d8ba553ece2d/ Log: hg merge default again diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -68,9 +68,6 @@ 'lstat': 'interp_posix.lstat', 'stat_float_times': 'interp_posix.stat_float_times', - 'fstatvfs': 'interp_posix.fstatvfs', - 'statvfs': 'interp_posix.statvfs', - 'dup': 'interp_posix.dup', 'dup2': 'interp_posix.dup2', 'access': 'interp_posix.access', @@ -106,6 +103,7 @@ ttyname chmod fchmod chown lchown fchown chroot link symlink readlink ftruncate getloadavg nice uname execv execve fork spawnv spawnve putenv unsetenv fchdir fsync fdatasync mknod + fstatvfs statvfs openpty forkpty mkfifo getlogin sysconf fpathconf getsid getuid geteuid getgid getegid getpgrp getpgid setsid setuid seteuid setgid setegid setpgrp setpgid diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -735,6 +735,10 @@ self.mc.RET() def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + """Loads the shadowstack top in ebx, and returns an integer + that gives the address of the stack top. If this integer doesn't + fit in 32 bits, it will be loaded in r11. + """ rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] @@ -752,6 +756,9 @@ if rx86.fits_in_32bits(rst): self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: + # The integer 'rst' doesn't fit in 32 bits, so we know that + # _load_shadowstack_top_in_ebx() above loaded it in r11. + # Reuse it. Be careful not to overwrite r11 in the middle! self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), ebx.value) # MOV [r11], ebx diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -84,7 +84,8 @@ else: separate_module_sources = [] export_symbols = [] - includes=['errno.h', 'stdio.h', 'stdlib.h', 'unistd.h', 'sys/stat.h', + includes=['errno.h', 'stdio.h', 'stdlib.h', 'unistd.h', + 'sys/stat.h', 'sys/statvfs.h', 'fcntl.h', 'signal.h', 'pty.h', 'sys/utsname.h', 'sys/wait.h', 'sysexits.h', 'limits.h'] rposix_eci = ExternalCompilationInfo( @@ -102,6 +103,7 @@ ttyname chmod fchmod chown lchown fchown chroot link symlink readlink ftruncate getloadavg nice uname execv execve fork spawnv spawnve putenv unsetenv fchdir fsync fdatasync mknod + fstatvfs statvfs openpty forkpty mkfifo getlogin sysconf fpathconf getsid getuid geteuid getgid getegid getpgrp getpgid setsid setuid seteuid setgid setegid setpgrp setpgid From noreply at buildbot.pypy.org Wed Jul 24 13:31:12 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 24 Jul 2013 13:31:12 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: fix link Message-ID: <20130724113112.CF6E51C23F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5002:24757a086b2b Date: 2013-07-19 16:11 +0200 http://bitbucket.org/pypy/extradoc/changeset/24757a086b2b/ Log: fix link diff --git a/sprintinfo/london-2013/announcement.txt b/sprintinfo/london-2013/announcement.txt --- a/sprintinfo/london-2013/announcement.txt +++ b/sprintinfo/london-2013/announcement.txt @@ -47,7 +47,7 @@ get there`_. We are being hosted by `Laurence Tratt`_ and the `Software Development Team`_. -.. _`King's College`: http://www.kcl.ac.uk/ +.. _`King's College's`: http://www.kcl.ac.uk/ .. _`Central London, UK`: http://goo.gl/maps/Qz0zz .. _`Strand Campus`: http://www.kcl.ac.uk/campuslife/campuses/strand/StrandCampusLocation.aspx .. _`how to get there`: http://www.kcl.ac.uk/campuslife/campuses/directions/strand.aspx From noreply at buildbot.pypy.org Wed Jul 24 13:31:14 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 24 Jul 2013 13:31:14 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: some people known to come Message-ID: <20130724113114.16D9F1C23F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5003:de5c4d640741 Date: 2013-07-19 16:12 +0200 http://bitbucket.org/pypy/extradoc/changeset/de5c4d640741/ Log: some people known to come diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -10,8 +10,11 @@ ==================== ============== ======================= Name Arrive/Depart Accomodation ==================== ============== ======================= -Carl Friedrich Bolz ? ? +Carl Friedrich Bolz ? Lukas +Lukas Diekmann lives there Romain Guillebert ? ? +Laurence Tratt lives there +Edd Barrett ? ? ==================== ============== ======================= @@ -53,5 +56,5 @@ Guido Wesdorp ? ? Leonardo Santagada ? ? Alexandre Fayolle ? ? -Sylvain Th�nault ? ? +Sylvain Thnault ? ? ==================== ============== ===================== From noreply at buildbot.pypy.org Wed Jul 24 13:31:15 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 24 Jul 2013 13:31:15 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20130724113115.525961C23F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5004:02849eb0a42a Date: 2013-07-24 11:30 +0200 http://bitbucket.org/pypy/extradoc/changeset/02849eb0a42a/ Log: merge diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/london-2013/planning.txt @@ -0,0 +1,8 @@ +Tasks +----- + +* fix the issue with the os.xyz attributes not showing up depending on + which host we use during translation + +* cffi 1.0: think about how to separate compilation from execution of + the script (e.g. a separate foo.c file, and "python -m cffi foo.c") From noreply at buildbot.pypy.org Wed Jul 24 13:31:18 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 24 Jul 2013 13:31:18 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20130724113118.547641C23F3@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5005:c6a375bcba0f Date: 2013-07-24 13:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/c6a375bcba0f/ Log: merge diff --git a/sprintinfo/london-2013/planning.txt b/sprintinfo/london-2013/planning.txt --- a/sprintinfo/london-2013/planning.txt +++ b/sprintinfo/london-2013/planning.txt @@ -2,7 +2,8 @@ ----- * fix the issue with the os.xyz attributes not showing up depending on - which host we use during translation + which host we use during translation. + Maybe revive branch 'missing-os-functions'. * cffi 1.0: think about how to separate compilation from execution of the script (e.g. a separate foo.c file, and "python -m cffi foo.c") From noreply at buildbot.pypy.org Wed Jul 24 14:41:58 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 24 Jul 2013 14:41:58 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: kill ObjectOrientedTypeSystem Message-ID: <20130724124158.855511C14BB@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65600:a442193ace92 Date: 2013-07-24 14:28 +0200 http://bitbucket.org/pypy/pypy/changeset/a442193ace92/ Log: kill ObjectOrientedTypeSystem diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -3,7 +3,6 @@ from rpython.tool.pairtype import extendabletype -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.error import TyperError @@ -142,48 +141,8 @@ v_list = hop.inputargs(robj1, robj2) return hop.genop('ptr_eq', v_list, resulttype=lltype.Bool) -class ObjectOrientedTypeSystem(TypeSystem): - name = "ootypesystem" - callable_trait = (ootype.StaticMethod, ootype.static_meth) - - def derefType(self, T): - assert isinstance(T, ootype.OOType) - return T - - def deref(self, obj): - assert isinstance(ootype.typeOf(obj), ootype.OOType) - return obj - - def check_null(self, repr, hop): - vlist = hop.inputargs(repr) - return hop.genop('oononnull', vlist, resulttype=ootype.Bool) - - def getconcretetype(self, v): - return v.concretetype - - def null_callable(self, T): - return ootype.null(T) - - def generic_is(self, robj1, robj2, hop): - roriginal1 = robj1 - roriginal2 = robj2 - if robj1.lowleveltype is lltype.Void: - robj1 = robj2 - elif robj2.lowleveltype is lltype.Void: - robj2 = robj1 - if (not isinstance(robj1.lowleveltype, (ootype.Instance, ootype.BuiltinADTType)) or - not isinstance(robj2.lowleveltype, (ootype.Instance, ootype.BuiltinADTType))) and \ - (robj1.lowleveltype is not ootype.Class or - robj2.lowleveltype is not ootype.Class): - raise TyperError('is of instances of the non-instances: %r, %r' % ( - roriginal1, roriginal2)) - - v_list = hop.inputargs(robj1, robj2) - return hop.genop('oois', v_list, resulttype=lltype.Bool) - # All typesystems are singletons LowLevelTypeSystem.instance = LowLevelTypeSystem() -ObjectOrientedTypeSystem.instance = ObjectOrientedTypeSystem() getfunctionptr = LowLevelTypeSystem.instance.getcallable From noreply at buildbot.pypy.org Wed Jul 24 14:42:00 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 24 Jul 2013 14:42:00 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: hg rm rpython/rtyper/ootypesystem/ Message-ID: <20130724124200.1BBF01C14BB@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65601:b7bd824e2c77 Date: 2013-07-24 14:32 +0200 http://bitbucket.org/pypy/pypy/changeset/b7bd824e2c77/ Log: hg rm rpython/rtyper/ootypesystem/ diff too long, truncating to 2000 out of 7609 lines diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -24,7 +24,6 @@ See description in doc/translation.txt.""" def __init__(self, translator=None, policy=None, bookkeeper=None): - import rpython.rtyper.ootypesystem.ooregistry # has side effects import rpython.rtyper.extfuncregistry # has side effects import rpython.rlib.nonconst # has side effects @@ -138,10 +137,10 @@ checkgraph(flowgraph) nbarg = len(flowgraph.getargs()) - if len(inputcells) != nbarg: - raise TypeError("%s expects %d args, got %d" %( + if len(inputcells) != nbarg: + raise TypeError("%s expects %d args, got %d" %( flowgraph, nbarg, len(inputcells))) - + # register the entry point self.addpendinggraph(flowgraph, inputcells) # recursively proceed until no more pending block is left @@ -267,7 +266,7 @@ pos = '?' if pos != '?': pos = self.whereami(pos) - + log.WARNING("%s/ %s" % (pos, msg)) @@ -297,7 +296,7 @@ v = graph.getreturnvar() try: return self.bindings[v] - except KeyError: + except KeyError: # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) return annmodel.s_ImpossibleValue diff --git a/rpython/rtyper/ootypesystem/__init__.py b/rpython/rtyper/ootypesystem/__init__.py deleted file mode 100644 diff --git a/rpython/rtyper/ootypesystem/exceptiondata.py b/rpython/rtyper/ootypesystem/exceptiondata.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/exceptiondata.py +++ /dev/null @@ -1,41 +0,0 @@ -from rpython.rtyper.exceptiondata import AbstractExceptionData -from rpython.rtyper.ootypesystem import rclass -from rpython.rtyper.ootypesystem import ootype -from rpython.annotator import model as annmodel -from rpython.annotator.classdef import FORCE_ATTRIBUTES_INTO_CLASSES - -class ExceptionData(AbstractExceptionData): - """Public information for the code generators to help with exceptions.""" - - def __init__(self, rtyper): - AbstractExceptionData.__init__(self, rtyper) - self._compute_exception_instance(rtyper) - - def _compute_exception_instance(self, rtyper): - excdef = rtyper.annotator.bookkeeper.getuniqueclassdef(Exception) - excrepr = rclass.getinstancerepr(rtyper, excdef) - self._EXCEPTION_INST = excrepr.lowleveltype - - def is_exception_instance(self, INSTANCE): - return ootype.isSubclass(INSTANCE, self._EXCEPTION_INST) - - def make_helpers(self, rtyper): - self.fn_exception_match = self.make_exception_matcher(rtyper) - self.fn_type_of_exc_inst = self.make_type_of_exc_inst(rtyper) - self.fn_raise_OSError = self.make_raise_OSError(rtyper) - - def make_exception_matcher(self, rtyper): - # ll_exception_matcher(real_exception_class, match_exception_class) - s_classtype = annmodel.SomeOOClass(ootype.ROOT) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_issubclass, [s_classtype, s_classtype]) - return helper_fn - - - def make_type_of_exc_inst(self, rtyper): - # ll_type_of_exc_inst(exception_instance) -> exception_vtable - s_excinst = annmodel.SomeOOInstance(self.lltype_of_exception_value) - helper_fn = rtyper.annotate_helper_fn(rclass.ll_inst_type, [s_excinst]) - return helper_fn - - def cast_exception(self, TYPE, value): - return ootype.ooupcast(TYPE, value) diff --git a/rpython/rtyper/ootypesystem/ll_str.py b/rpython/rtyper/ootypesystem/ll_str.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/ll_str.py +++ /dev/null @@ -1,49 +0,0 @@ -import sys -from rpython.rtyper.ootypesystem.ootype import new, oostring, StringBuilder -from rpython.rtyper.ootypesystem.ootype import make_string - -def ll_int_str(repr, i): - return ll_int2dec(i) - -def ll_int2dec(i): - return oostring(i, 10) - -SPECIAL_VALUE = -sys.maxint-1 -SPECIAL_VALUE_HEX = make_string( - '-' + hex(sys.maxint+1).replace('L', '').replace('l', '')) -SPECIAL_VALUE_OCT = make_string( - '-' + oct(sys.maxint+1).replace('L', '').replace('l', '')) - -def ll_int2hex(i, addPrefix): - if not addPrefix: - return oostring(i, 16) - - buf = new(StringBuilder) - if i<0: - if i == SPECIAL_VALUE: - return SPECIAL_VALUE_HEX - i = -i - buf.ll_append_char('-') - - buf.ll_append_char('0') - buf.ll_append_char('x') - buf.ll_append(oostring(i, 16)) - return buf.ll_build() - -def ll_int2oct(i, addPrefix): - if not addPrefix or i==0: - return oostring(i, 8) - - buf = new(StringBuilder) - if i<0: - if i == SPECIAL_VALUE: - return SPECIAL_VALUE_OCT - i = -i - buf.ll_append_char('-') - - buf.ll_append_char('0') - buf.ll_append(oostring(i, 8)) - return buf.ll_build() - -def ll_float_str(repr, f): - return oostring(f, -1) diff --git a/rpython/rtyper/ootypesystem/module/__init__.py b/rpython/rtyper/ootypesystem/module/__init__.py deleted file mode 100644 diff --git a/rpython/rtyper/ootypesystem/module/ll_math.py b/rpython/rtyper/ootypesystem/module/ll_math.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/module/ll_math.py +++ /dev/null @@ -1,27 +0,0 @@ -import math -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.ootypesystem.rtupletype import TUPLE_TYPE - -FREXP_RESULT = TUPLE_TYPE([ootype.Float, ootype.Signed]) -MODF_RESULT = TUPLE_TYPE([ootype.Float, ootype.Float]) - -def ll_frexp_result(mantissa, exponent): - tup = ootype.new(FREXP_RESULT) - tup.item0 = mantissa - tup.item1 = exponent - return tup - -def ll_modf_result(fracpart, intpart): - tup = ootype.new(MODF_RESULT) - tup.item0 = fracpart - tup.item1 = intpart - return tup - -def ll_math_frexp(x): - mantissa, exponent = math.frexp(x) - return ll_frexp_result(mantissa, exponent) - -def ll_math_modf(x): - fracpart, intpart = math.modf(x) - return ll_modf_result(fracpart, intpart) - diff --git a/rpython/rtyper/ootypesystem/module/ll_os.py b/rpython/rtyper/ootypesystem/module/ll_os.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/module/ll_os.py +++ /dev/null @@ -1,7 +0,0 @@ -# mostly-deprecated module - -from rpython.rtyper.ootypesystem import ootype -from rpython.rtyper.ootypesystem.rtupletype import TUPLE_TYPE -from rpython.rtyper.module.ll_os_stat import PORTABLE_STAT_FIELDS - -STAT_RESULT = TUPLE_TYPE([_TYPE for _name, _TYPE in PORTABLE_STAT_FIELDS]) diff --git a/rpython/rtyper/ootypesystem/module/ll_os_path.py b/rpython/rtyper/ootypesystem/module/ll_os_path.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/module/ll_os_path.py +++ /dev/null @@ -1,5 +0,0 @@ -from rpython.rtyper.module.support import OOSupport -from rpython.rtyper.module.ll_os_path import BaseOsPath - -class Implementation(BaseOsPath, OOSupport): - pass diff --git a/rpython/rtyper/ootypesystem/ooopimpl.py b/rpython/rtyper/ootypesystem/ooopimpl.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/ooopimpl.py +++ /dev/null @@ -1,73 +0,0 @@ -from rpython.rtyper.ootypesystem import ootype - -# ____________________________________________________________ -# Implementation of the 'canfold' oo operations - -def op_ooupcast(INST, inst): - return ootype.ooupcast(INST, inst) -op_ooupcast.need_result_type = True - -def op_oodowncast(INST, inst): - return ootype.oodowncast(INST, inst) -op_oodowncast.need_result_type = True - -def op_cast_to_object(inst): - return ootype.cast_to_object(inst) - -def op_cast_from_object(TYPE, obj): - return ootype.cast_from_object(TYPE, obj) -op_cast_from_object.need_result_type = True - -def op_oononnull(inst): - checkinst(inst) - return bool(inst) - -def op_ooisnull(inst): - return not op_oononnull(inst) - -def op_oois(obj1, obj2): - if is_inst(obj1): - checkinst(obj2) - return obj1 == obj2 # NB. differently-typed NULLs must be equal - elif isinstance(obj1, ootype._class): - assert isinstance(obj2, ootype._class) - return obj1 is obj2 - elif isinstance(obj1, ootype._object): - assert isinstance(obj2, ootype._object) - return obj1 == obj2 - else: - assert False, "oois on something silly" - -def op_ooisnot(obj1, obj2): - return not op_oois(obj1, obj2) - -def op_instanceof(inst, INST): - return ootype.instanceof(inst, INST) - -def op_classof(inst): - return ootype.classof(inst) - -def op_subclassof(class1, class2): - return ootype.subclassof(class1, class2) - -def op_oogetfield(inst, name): - checkinst(inst) - if not ootype.typeOf(inst)._hints.get('immutable'): - raise TypeError("cannot fold oogetfield on mutable instance") - return getattr(inst, name) - -def is_inst(inst): - T = ootype.typeOf(inst) - return T is ootype.Object or T is ootype.Class or\ - isinstance(T, (ootype.Instance, - ootype.BuiltinType, - ootype.StaticMethod,)) - -def checkinst(inst): - assert is_inst(inst) - -# ____________________________________________________________ - -def get_op_impl(opname): - # get the op_xxx() function from the globals above - return globals()['op_' + opname] diff --git a/rpython/rtyper/ootypesystem/ooregistry.py b/rpython/rtyper/ootypesystem/ooregistry.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/ooregistry.py +++ /dev/null @@ -1,85 +0,0 @@ -from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.annotator import model as annmodel -from rpython.rtyper.ootypesystem import ootype - -class Entry_oostring(ExtRegistryEntry): - _about_ = ootype.oostring - - def compute_result_annotation(self, obj_s, base_s): - assert isinstance(obj_s, (annmodel.SomeInteger, - annmodel.SomeChar, - annmodel.SomeFloat, - annmodel.SomeOOInstance, - annmodel.SomeString)) - assert isinstance(base_s, annmodel.SomeInteger) - return annmodel.SomeOOInstance(ootype.String) - - def specialize_call(self, hop): - assert isinstance(hop.args_s[0],(annmodel.SomeInteger, - annmodel.SomeChar, - annmodel.SomeString, - annmodel.SomeFloat, - annmodel.SomeOOInstance, - annmodel.SomeString)) - vlist = hop.inputargs(hop.args_r[0], ootype.Signed) - hop.exception_cannot_occur() - return hop.genop('oostring', vlist, resulttype = ootype.String) - -class Entry_oounicode(ExtRegistryEntry): - _about_ = ootype.oounicode - - def compute_result_annotation(self, obj_s, base_s): - assert isinstance(obj_s, annmodel.SomeUnicodeCodePoint) or \ - (isinstance(obj_s, annmodel.SomeOOInstance) - and obj_s.ootype in (ootype.String, ootype.Unicode)) - assert isinstance(base_s, annmodel.SomeInteger) - return annmodel.SomeOOInstance(ootype.Unicode) - - def specialize_call(self, hop): - assert isinstance(hop.args_s[0], (annmodel.SomeUnicodeCodePoint, - annmodel.SomeOOInstance)) - vlist = hop.inputargs(hop.args_r[0], ootype.Signed) - hop.exception_cannot_occur() - return hop.genop('oounicode', vlist, resulttype = ootype.Unicode) - - -class Entry_ootype_string(ExtRegistryEntry): - _type_ = ootype._string - - def compute_annotation(self): - return annmodel.SomeOOInstance(ootype=ootype.typeOf(self.instance)) - - -class Entry_ooparse_int(ExtRegistryEntry): - _about_ = ootype.ooparse_int - - def compute_result_annotation(self, str_s, base_s): - assert isinstance(str_s, annmodel.SomeOOInstance)\ - and str_s.ootype is ootype.String - assert isinstance(base_s, annmodel.SomeInteger) - return annmodel.SomeInteger() - - def specialize_call(self, hop): - assert isinstance(hop.args_s[0], annmodel.SomeOOInstance)\ - and hop.args_s[0].ootype is ootype.String - vlist = hop.inputargs(hop.args_r[0], ootype.Signed) - hop.has_implicit_exception(ValueError) - hop.exception_is_here() - return hop.genop('ooparse_int', vlist, resulttype = ootype.Signed) - - -class Entry_ooparse_float(ExtRegistryEntry): - _about_ = ootype.ooparse_float - - def compute_result_annotation(self, str_s): - assert isinstance(str_s, annmodel.SomeOOInstance)\ - and str_s.ootype is ootype.String - return annmodel.SomeFloat() - - def specialize_call(self, hop): - assert isinstance(hop.args_s[0], annmodel.SomeOOInstance)\ - and hop.args_s[0].ootype is ootype.String - vlist = hop.inputargs(hop.args_r[0]) - hop.has_implicit_exception(ValueError) - hop.exception_is_here() - return hop.genop('ooparse_float', vlist, resulttype = ootype.Float) diff --git a/rpython/rtyper/ootypesystem/ootype.py b/rpython/rtyper/ootypesystem/ootype.py deleted file mode 100644 --- a/rpython/rtyper/ootypesystem/ootype.py +++ /dev/null @@ -1,2049 +0,0 @@ -import py - -from rpython.rlib import objectmodel, types -from rpython.rlib.signature import signature -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.lltypesystem.lltype import (LowLevelType, Signed, Unsigned, - Float, Char, Bool, Void, UniChar, typeOf, Primitive, isCompatibleType, - enforce, saferecursive, SignedLongLong, UnsignedLongLong, frozendict, - identityhash) -from rpython.tool.uid import uid - - -STATICNESS = True - -class OOType(LowLevelType): - - oopspec_name = None - - _classes = {} - - @property - def _class(self): - try: - return self._classes[self] - except KeyError: - cls = _class(self) - self._classes[self] = cls - return cls - - - def _is_compatible(TYPE1, TYPE2): - if TYPE1 == TYPE2: - return True - if isinstance(TYPE1, Instance) and isinstance(TYPE2, Instance): - return isSubclass(TYPE1, TYPE2) - else: - return False - - def _enforce(TYPE2, value): - TYPE1 = typeOf(value) - if TYPE1 == TYPE2: - return value - if isinstance(TYPE1, Instance) and isinstance(TYPE2, Instance): - if isSubclass(TYPE1, TYPE2): - return value._enforce(TYPE2) - raise TypeError - - -class ForwardReference(OOType): - def become(self, realtype): - if not isinstance(realtype, OOType): - raise TypeError("ForwardReference can only be to an OOType, " - "not %r" % (realtype,)) - self.__class__ = realtype.__class__ - self.__dict__ = realtype.__dict__ - - def __hash__(self): - raise TypeError("%r object is not hashable" % self.__class__.__name__) - - -# warning: the name Object is rebount at the end of file -class Object(OOType): - """ - A type which everything can be casted to. - """ - - def _defl(self): - return self._null - - -class Class(OOType): - - def _defl(self): - return nullruntimeclass - - def _example(self): - return _class(ROOT) - -Class = Class() - -class Instance(OOType): - """this is the type of user-defined objects""" - def __init__(self, name, superclass, fields={}, methods={}, - _is_root=False, _hints = {}): - self._name = name - self._hints = frozendict(_hints) - self._subclasses = [] - - if _is_root: - self._superclass = None - else: - if superclass is not None: - self._set_superclass(superclass) - - self._methods = frozendict() - self._fields = frozendict() - self._overridden_defaults = frozendict() - self._fields_with_default = [] - - self._add_fields(fields) - self._add_methods(methods) - - self._null = make_null_instance(self) - self.__dict__['_class'] = _class(self) - - def __eq__(self, other): - return self is other - - def __ne__(self, other): - return self is not other - - def __hash__(self): - return object.__hash__(self) - - def _defl(self): - return self._null - - def _example(self): return new(self) - - def __repr__(self): - return '<%s>' % (self,) - - def __str__(self): - return '%s(%s)' % (self.__class__.__name__, self._name) - - def _set_superclass(self, superclass): - assert isinstance(superclass, Instance) - self._superclass = superclass - self._superclass._add_subclass(self) - - def _add_subclass(self, INSTANCE): - assert isinstance(INSTANCE, Instance) - self._subclasses.append(INSTANCE) - - def _all_subclasses(self): - """ - Transitive closure on self._subclasses. - - Return a set containing all direct and indirect subclasses, - including itself. - """ - res = set() - stack = [self] - while stack: - item = stack.pop() - res.add(item) - stack += item._subclasses - return res - - def _add_fields(self, fields, with_default=False): - fields = fields.copy() # mutated below - for name, defn in fields.iteritems(): - _, meth = self._lookup(name) - if meth is not None: - raise TypeError("Cannot add field %r: method already exists" % name) - - if self._superclass is not None: - if self._superclass._has_field(name): - raise TypeError("Field %r exists in superclass" % name) - - if type(defn) is not tuple: - if isinstance(defn, Meth): - raise TypeError("Attempting to store method in field") - - fields[name] = (defn, defn._defl()) - else: - ootype, default = defn - - if isinstance(ootype, Meth): - raise TypeError("Attempting to store method in field") - - if ootype != typeOf(default): - raise TypeError("Expected type %r for default" % (ootype,)) - - self._fields.update(fields) - if with_default: - self._fields_with_default.extend(fields.items()) - - def _override_default_for_fields(self, fields): - # sanity check - for field in fields: - INST, TYPE = self._superclass._lookup_field(field) - assert TYPE is not None, "Can't find field %s in superclasses" % field - self._overridden_defaults.update(fields) - - def _add_methods(self, methods): - # Note to the unwary: _add_methods adds *methods* whereas - # _add_fields adds *descriptions* of fields. This is obvious - # if you are in the right state of mind (swiss?), but - # certainly not necessarily if not. - for name, method in methods.iteritems(): - if self._has_field(name): - raise TypeError("Can't add method %r: field already exists" % name) - if not isinstance(typeOf(method), Meth): - raise TypeError("added methods must be _meths, not %s" % type(method)) - self._methods.update(methods) - - def _init_instance(self, instance): - if self._superclass is not None: - self._superclass._init_instance(instance) - - for name, (ootype, default) in self._fields.iteritems(): - instance.__dict__[name] = enforce(ootype, default) - - for name, (ootype, default) in self._overridden_defaults.iteritems(): - instance.__dict__[name] = enforce(ootype, default) - - def _has_field(self, name): - try: - self._fields[name] - return True - except KeyError: - if self._superclass is None: - return False - - return self._superclass._has_field(name) - - def _field_type(self, name): - try: - return self._fields[name][0] - except KeyError: - if self._superclass is None: - raise TypeError("No field names %r" % name) - - return self._superclass._field_type(name) - - _check_field = _field_type - - def _lookup_field(self, name): - field = self._fields.get(name) - - if field is None and self._superclass is not None: - return self._superclass._lookup_field(name) - - try: - return self, field[0] - except TypeError: - return self, None - - def _lookup(self, meth_name): - meth = self._methods.get(meth_name) - - if meth is None and self._superclass is not None: - return self._superclass._lookup(meth_name) - - return self, meth - - def _allfields(self): - if self._superclass is None: - all = {} - else: - all = self._superclass._allfields() - all.update(self._fields) - return all - - def _lookup_graphs(self, meth_name): - _, meth = self._lookup(meth_name) - graphs = set() - if not getattr(meth, 'abstract', False): - graphs.add(meth.graph) - for SUBTYPE in self._subclasses: - graphs.update(SUBTYPE._lookup_graphs(meth_name)) - return graphs - - def _get_fields_with_default(self): - if self._superclass is None: - return self._fields_with_default[:] - return self._superclass._get_fields_with_default() + self._fields_with_default - - def _immutable_field(self, field): - if self._hints.get('immutable'): - return True - if 'immutable_fields' in self._hints: - try: - return self._hints['immutable_fields'].fields[field] - except KeyError: - pass - return False - - -class SpecializableType(OOType): - def _specialize_type(self, TYPE, generic_types): - if isinstance(TYPE, SpecializableType): - res = TYPE._specialize(generic_types) - else: - res = generic_types.get(TYPE, TYPE) - assert res is not None - return res - - def _specialize(self, generic_types): - raise NotImplementedError - -class StaticMethod(SpecializableType): - - def __init__(self, args, result): - self.ARGS = tuple(args) - self.RESULT = result - self._null = _null_static_meth(self) - - def _example(self): - _retval = self.RESULT._example() - return _static_meth(self, _callable=lambda *args: _retval) - - def _defl(self): - return null(self) - - def __repr__(self): - return "<%s(%s, %s)>" % (self.__class__.__name__, list(self.ARGS), self.RESULT) - - __str__ = __repr__ - - def _specialize(self, generic_types): - ARGS = tuple([self._specialize_type(ARG, generic_types) - for ARG in self.ARGS]) - RESULT = self._specialize_type(self.RESULT, generic_types) - return self.__class__(ARGS, RESULT) - - -class Meth(StaticMethod): - - SELFTYPE = None - - def __init__(self, args, result): - StaticMethod.__init__(self, args, result) - - -class BuiltinType(SpecializableType): - - def _example(self): - return new(self) - - def _defl(self): - return self._null - - def _get_interp_class(self): - raise NotImplementedError - -class Record(BuiltinType): - - # We try to keep Record as similar to Instance as possible, so backends - # can treat them polymorphically, if they choose to do so. - - def __init__(self, fields, _hints={}): - if isinstance(fields, dict): - fields = fields.items() # random order in that case - self._fields = frozendict() - fields_in_order = [] - for name, ITEMTYPE in fields: - self._fields[name] = ITEMTYPE, ITEMTYPE._defl() - fields_in_order.append(name) - self._fields_in_order = tuple(fields_in_order) - self._null = _null_record(self) - self._hints = frozendict(_hints) - - def _defl(self): - return self._null - - def _get_interp_class(self): - return _record - - def _field_type(self, name): - try: - return self._fields[name][0] - except KeyError: - raise TypeError("No field names %r" % name) - - _check_field = _field_type - - def _lookup(self, meth_name): - return self, None - - def _lookup_field(self, name): - try: - return self, self._field_type(name) - except TypeError: - return self, None - - def __str__(self): - item_str = ["%s: %s" % (str(name), str(self._fields[name][0])) - for name in self._fields_in_order] - return '%s(%s)' % (self.__class__.__name__, ", ".join(item_str)) - -class BuiltinADTType(BuiltinType): - - immutable = False # conservative - - def _setup_methods(self, generic_types, can_raise=[], pure_meth=[]): - methods = {} - for name, meth in self._GENERIC_METHODS.iteritems(): - args = [self._specialize_type(arg, generic_types) for arg in meth.ARGS] - result = self._specialize_type(meth.RESULT, generic_types) - METH = Meth(args, result) - METH.SELFTYPE = self - methods[name] = METH - self._METHODS = frozendict(methods) - self._can_raise = tuple(can_raise) - if pure_meth == 'ALL': - self._pure_meth = tuple(methods.keys()) - else: - self._pure_meth = tuple(pure_meth) - - def _lookup(self, meth_name): - METH = self._METHODS.get(meth_name) - meth = None - if METH is not None: - cls = self._get_interp_class() - can_raise = meth_name in self._can_raise - pure_meth = meth_name in self._pure_meth - meth = _meth(METH, _name=meth_name, - _callable=getattr(cls, meth_name), - _can_raise=can_raise, _pure_meth=pure_meth) - meth._virtual = False - return self, meth - - def _lookup_graphs(self, meth_name): - return set() - - -class AbstractString(BuiltinADTType): - - oopspec_name = 'str' - immutable = True - - def __init__(self): - self._null = _null_string(self) - - generic_types = { self.SELFTYPE_T: self } - self._GENERIC_METHODS = frozendict({ - "ll_hash": Meth([], Signed), - "ll_stritem_nonneg": Meth([Signed], self.CHAR), - "ll_strlen": Meth([], Signed), - "ll_strconcat": Meth([self.SELFTYPE_T], self.SELFTYPE_T), - "ll_streq": Meth([self.SELFTYPE_T], Bool), - "ll_strcmp": Meth([self.SELFTYPE_T], Signed), - "ll_startswith": Meth([self.SELFTYPE_T], Bool), - "ll_startswith_char": Meth([self.CHAR], Bool), - "ll_endswith": Meth([self.SELFTYPE_T], Bool), - "ll_endswith_char": Meth([self.CHAR], Bool), - "ll_find": Meth([self.SELFTYPE_T, Signed, Signed], Signed), - "ll_rfind": Meth([self.SELFTYPE_T, Signed, Signed], Signed), - "ll_count": Meth([self.SELFTYPE_T, Signed, Signed], Signed), - "ll_find_char": Meth([self.CHAR, Signed, Signed], Signed), - "ll_rfind_char": Meth([self.CHAR, Signed, Signed], Signed), - "ll_count_char": Meth([self.CHAR, Signed, Signed], Signed), - "ll_strip": Meth([self.CHAR, Bool, Bool], self.SELFTYPE_T), - "ll_upper": Meth([], self.SELFTYPE_T), - "ll_lower": Meth([], self.SELFTYPE_T), - "ll_substring": Meth([Signed, Signed], self.SELFTYPE_T), # ll_substring(start, count) - "ll_split_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! - "ll_rsplit_chr": Meth([self.CHAR, Signed], Array(self.SELFTYPE_T)), # XXX this is not pure! - "ll_contains": Meth([self.CHAR], Bool), - "ll_replace_chr_chr": Meth([self.CHAR, self.CHAR], self.SELFTYPE_T), - }) - self._setup_methods(generic_types, pure_meth='ALL') - - def _example(self): - return self._defl() - - def _get_interp_class(self): - return _string - - def _specialize(self, generic_types): - return self - -# WARNING: the name 'String' is rebound at the end of file -class String(AbstractString): - SELFTYPE_T = object() - CHAR = Char - _name = 'String' - - # TODO: should it return _null or ''? - def _defl(self): - return make_string('') - - def _enforce(self, value): - # XXX share this with Unicode? - TYPE = typeOf(value) - if TYPE == self.CHAR: - return make_string(value) - else: - return BuiltinADTType._enforce(self, value) - - -# WARNING: the name 'Unicode' is rebound at the end of file -class Unicode(AbstractString): - SELFTYPE_T = object() - CHAR = UniChar - _name = 'Unicode' - - # TODO: should it return _null or ''? - def _defl(self): - return make_unicode(u'') - - def _enforce(self, value): - TYPE = typeOf(value) - if TYPE == self.CHAR: - return make_unicode(value) - else: - return BuiltinADTType._enforce(self, value) - - - - -# WARNING: the name 'StringBuilder' is rebound at the end of file -class StringBuilder(BuiltinADTType): - oopspec_name = 'stringbuilder' - - def __init__(self, STRINGTP, CHARTP): - self._null = _null_string_builder(self) - self._GENERIC_METHODS = frozendict({ - "ll_allocate": Meth([Signed], Void), - "ll_append_char": Meth([CHARTP], Void), - "ll_append": Meth([STRINGTP], Void), - "ll_build": Meth([], STRINGTP), - "ll_getlength": Meth([], Signed), - }) - self._setup_methods({}) - - def _defl(self): - return self._null - - def _get_interp_class(self): - return _string_builder - - def _specialize(self, generic_types): - return self - -# WARNING: the name WeakReference is rebound at the end of file -class WeakReference(BuiltinADTType): - def __init__(self): - self._null = _null_weak_reference(self) - self._GENERIC_METHODS = frozendict({ - "ll_set": Meth([ROOT], Void), - "ll_deref": Meth([], ROOT), - }) - self._setup_methods({}) - - def _defl(self): - return self._null - - def _get_interp_class(self): - return _weak_reference - - def _specialize(self, generic_types): - return self - -class List(BuiltinADTType): - # placeholders for types - # make sure that each derived class has his own SELFTYPE_T - # placeholder, because we want backends to distinguish that. - SELFTYPE_T = object() - ITEMTYPE_T = object() - oopspec_name = 'list' - oopspec_new = 'new(0)' - oopspec_new_argnames = () - - def __init__(self, ITEMTYPE=None): - self.ITEM = ITEMTYPE - self._null = _null_list(self) - if ITEMTYPE is not None: - self._init_methods() - - def _init_methods(self): - # This defines the abstract list interface that backends will - # have to map to their native list implementations. - # 'ITEMTYPE_T' is used as a placeholder for indicating - # arguments that should have ITEMTYPE type. 'SELFTYPE_T' indicates 'self' - - generic_types = { - self.SELFTYPE_T: self, - self.ITEMTYPE_T: self.ITEM, - } - - # the methods are named after the ADT methods of lltypesystem's lists - self._GENERIC_METHODS = frozendict({ - # "name": Meth([ARGUMENT1_TYPE, ARGUMENT2_TYPE, ...], RESULT_TYPE) - "ll_length": Meth([], Signed), - "ll_getitem_fast": Meth([Signed], self.ITEMTYPE_T), - "ll_setitem_fast": Meth([Signed, self.ITEMTYPE_T], Void), - "_ll_resize_ge": Meth([Signed], Void), - "_ll_resize_le": Meth([Signed], Void), - "_ll_resize": Meth([Signed], Void), - "_ll_resize_hint": Meth([Signed], Void), - }) - - self._setup_methods(generic_types) - - # this is the equivalent of the lltypesystem ll_newlist that is - # marked as typeMethod. - @signature(types.any(), types.int(), returns=types.any()) - def ll_newlist(self, length): - from rpython.rtyper.ootypesystem import rlist - return rlist.ll_newlist(self, length) - - # NB: We are expecting Lists of the same ITEMTYPE to compare/hash - # equal. We don't redefine __eq__/__hash__ since the implementations - # from LowLevelType work fine, especially in the face of recursive - # data structures. But it is important to make sure that attributes - # of supposedly equal Lists compare/hash equal. - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, List): - return False - if self.ITEM is None or other.ITEM is None: - return False # behave like a ForwardReference, i.e. compare by identity - return BuiltinADTType.__eq__(self, other) - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - if self.ITEM is None: - raise TypeError("Can't hash uninitialized List type.") - return BuiltinADTType.__hash__(self) - - def __str__(self): - return '%s(%s)' % (self.__class__.__name__, - saferecursive(str, "...")(self.ITEM)) - - def _get_interp_class(self): - return _list - - def _specialize(self, generic_types): - ITEMTYPE = self._specialize_type(self.ITEM, generic_types) - return self.__class__(ITEMTYPE) - - def _defl(self): - return self._null - - def _set_itemtype(self, ITEMTYPE): - self.ITEM = ITEMTYPE - self._init_methods() - - def ll_convert_from_array(self, array): - length = array.ll_length() - result = self.ll_newlist(length) - for n in range(length): - result.ll_setitem_fast(n, array.ll_getitem_fast(n)) - return result - -class Array(BuiltinADTType): - # placeholders for types - # make sure that each derived class has his own SELFTYPE_T - # placeholder, because we want backends to distinguish that. - - SELFTYPE_T = object() - ITEMTYPE_T = object() - oopspec_name = 'list' - oopspec_new = 'new(length)' - oopspec_new_argnames = ('length',) - - def __init__(self, ITEMTYPE=None, _hints = {}): - self.ITEM = ITEMTYPE - self._hints = frozendict(_hints) - self._null = _null_array(self) - if ITEMTYPE is not None: - self._init_methods() - - def _init_methods(self): - # This defines the abstract list interface that backends will - # have to map to their native list implementations. - # 'ITEMTYPE_T' is used as a placeholder for indicating - # arguments that should have ITEMTYPE type. 'SELFTYPE_T' indicates 'self' - - generic_types = { - self.SELFTYPE_T: self, - self.ITEMTYPE_T: self.ITEM, - } - - # the methods are named after the ADT methods of lltypesystem's lists - self._GENERIC_METHODS = frozendict({ - # "name": Meth([ARGUMENT1_TYPE, ARGUMENT2_TYPE, ...], RESULT_TYPE) - "ll_length": Meth([], Signed), - "ll_getitem_fast": Meth([Signed], self.ITEMTYPE_T), - "ll_setitem_fast": Meth([Signed, self.ITEMTYPE_T], Void), - }) - - self._setup_methods(generic_types) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, Array): - return False - if self.ITEM is None or other.ITEM is None: - return False # behave like a ForwardReference, i.e. compare by identity - return BuiltinADTType.__eq__(self, other) - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - if self.ITEM is None: - raise TypeError("Can't hash uninitialized List type.") - return BuiltinADTType.__hash__(self) - - def __str__(self): - return '%s(%s)' % (self.__class__.__name__, - saferecursive(str, "...")(self.ITEM)) - - def _get_interp_class(self): - return _array - - def _specialize(self, generic_types): - ITEMTYPE = self._specialize_type(self.ITEM, generic_types) - return self.__class__(ITEMTYPE) - - def _defl(self): - return self._null - - def _example(self): - return oonewarray(self, 1) - - def _set_itemtype(self, ITEMTYPE): - self.ITEM = ITEMTYPE - self._init_methods() - - @signature(types.any(), types.int(), returns=types.any()) - def ll_newlist(self, length): - from rpython.rtyper.ootypesystem import rlist - return rlist.ll_newarray(self, length) - - def ll_convert_from_array(self, array): - return array - - -class Dict(BuiltinADTType): - # placeholders for types - SELFTYPE_T = object() - KEYTYPE_T = object() - VALUETYPE_T = object() - oopspec_name = 'dict' - oopspec_new = 'new()' - oopspec_new_argnames = () - - def __init__(self, KEYTYPE=None, VALUETYPE=None): - self._KEYTYPE = KEYTYPE - self._VALUETYPE = VALUETYPE - self._null = _null_dict(self) - - if self._is_initialized(): - self._init_methods() - - def _is_initialized(self): - return self._KEYTYPE is not None and self._VALUETYPE is not None - - def _init_methods(self): - # XXX clean-up later! Rename _KEYTYPE and _VALUETYPE to KEY and VALUE. - # For now they are just synonyms, please use KEY/VALUE in new code. - self.KEY = self._KEYTYPE - self.VALUE = self._VALUETYPE - - self._generic_types = frozendict({ - self.SELFTYPE_T: self, - self.KEYTYPE_T: self._KEYTYPE, - self.VALUETYPE_T: self._VALUETYPE - }) - - # ll_get() is always used just after a call to ll_contains(), - # always with the same key, so backends can optimize/cache the - # result - self._GENERIC_METHODS = frozendict({ - "ll_length": Meth([], Signed), - "ll_get": Meth([self.KEYTYPE_T], self.VALUETYPE_T), - "ll_set": Meth([self.KEYTYPE_T, self.VALUETYPE_T], Void), - "ll_remove": Meth([self.KEYTYPE_T], Bool), # return False is key was not present - "ll_contains": Meth([self.KEYTYPE_T], Bool), - "ll_clear": Meth([], Void), - "ll_get_items_iterator": Meth([], DictItemsIterator(self.KEYTYPE_T, self.VALUETYPE_T)), - }) - - self._setup_methods(self._generic_types) - - # NB: We are expecting Dicts of the same KEYTYPE, VALUETYPE to - # compare/hash equal. We don't redefine __eq__/__hash__ since the - # implementations from LowLevelType work fine, especially in the - # face of recursive data structures. But it is important to make - # sure that attributes of supposedly equal Dicts compare/hash - # equal. - - def __str__(self): - return '%s(%s, %s)' % (self.__class__.__name__, - self._KEYTYPE, saferecursive(str, "...")(self._VALUETYPE)) - - def __eq__(self, other): - if self is other: - return True - if not isinstance(other, Dict): - return False - if not self._is_initialized() or not other._is_initialized(): - return False # behave like a ForwardReference, i.e. compare by identity - return BuiltinADTType.__eq__(self, other) - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - if not self._is_initialized(): - raise TypeError("Can't hash uninitialized Dict type.") - return BuiltinADTType.__hash__(self) - - def _get_interp_class(self): - return _dict - - def _specialize(self, generic_types): - KEYTYPE = self._specialize_type(self._KEYTYPE, generic_types) - VALUETYPE = self._specialize_type(self._VALUETYPE, generic_types) - return self.__class__(KEYTYPE, VALUETYPE) - - def _set_types(self, KEYTYPE, VALUETYPE): - self._KEYTYPE = KEYTYPE - self._VALUETYPE = VALUETYPE - self._init_methods() - - -class CustomDict(Dict): - def __init__(self, KEYTYPE=None, VALUETYPE=None): - Dict.__init__(self, KEYTYPE, VALUETYPE) - self._null = _null_custom_dict(self) - - if self._is_initialized(): - self._init_methods() - - def _init_methods(self): - Dict._init_methods(self) - EQ_FUNC = StaticMethod([self.KEYTYPE_T, self.KEYTYPE_T], Bool) - HASH_FUNC = StaticMethod([self.KEYTYPE_T], Signed) - self._GENERIC_METHODS['ll_set_functions'] = Meth([EQ_FUNC, HASH_FUNC], Void) - self._GENERIC_METHODS['ll_copy'] = Meth([], self.SELFTYPE_T) - self._setup_methods(self._generic_types, can_raise=['ll_get', 'll_set', 'll_remove', 'll_contains']) - - def _get_interp_class(self): - return _custom_dict - - -class DictItemsIterator(BuiltinADTType): - SELFTYPE_T = object() - KEYTYPE_T = object() - VALUETYPE_T = object() - - def __init__(self, KEYTYPE, VALUETYPE): - self._KEYTYPE = KEYTYPE - self._VALUETYPE = VALUETYPE - self._null = _null_dict_items_iterator(self) - - generic_types = { - self.SELFTYPE_T: self, - self.KEYTYPE_T: KEYTYPE, - self.VALUETYPE_T: VALUETYPE - } - - # Dictionaries are not allowed to be changed during an - # iteration. The ll_go_next method should check this condition - # and raise RuntimeError in that case. - self._GENERIC_METHODS = frozendict({ - "ll_go_next": Meth([], Bool), # move forward; return False is there is no more data available - "ll_current_key": Meth([], self.KEYTYPE_T), - "ll_current_value": Meth([], self.VALUETYPE_T), - }) - self._setup_methods(generic_types, can_raise=['ll_go_next']) - - def __str__(self): - return '%s%s' % (self.__class__.__name__, - saferecursive(str, "(...)")((self._KEYTYPE, self._VALUETYPE))) - - def _get_interp_class(self): - return _dict_items_iterator - - def _specialize(self, generic_types): - KEYTYPE = self._specialize_type(self._KEYTYPE, generic_types) - VALUETYPE = self._specialize_type(self._VALUETYPE, generic_types) - return self.__class__(KEYTYPE, VALUETYPE) - -# ____________________________________________________________ - -class _object(object): - - def __init__(self, obj): - self._TYPE = Object - assert obj is None or obj, 'Cannot create _object of a null value, use make_object() instead' - self.obj = obj - - def __nonzero__(self): - return self.obj is not None - - def __eq__(self, other): - if not isinstance(other, _object): - raise TypeError("comparing an _object with %r" % other) - if self.obj is None: - return other.obj is None - elif other.obj is None: - return self.obj is None - else: - return self.obj.__class__ == other.obj.__class__ and \ - self.obj == other.obj - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash(self.obj) - - def _identityhash(self): - try: - return self.obj._identityhash() - except AttributeError: - return hash(self.obj) - - def _cast_to_object(self): - return self - - def _cast_to(self, EXPECTED_TYPE): - if self.obj is None: - return null(EXPECTED_TYPE) - elif EXPECTED_TYPE is Object: - return self - elif isinstance(EXPECTED_TYPE, Instance): - return oodowncast(EXPECTED_TYPE, self.obj) - else: - T = typeOf(self.obj) - if T != EXPECTED_TYPE: - raise RuntimeError("Invalid cast: %s --> %s" % (T, EXPECTED_TYPE)) - return self.obj - - -class _class(object): - _TYPE = Class - - def __init__(self, INSTANCE): - self._INSTANCE = INSTANCE - - def _cast_to_object(self): - return make_object(self) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, self._INSTANCE) - - def __nonzero__(self): - return self._INSTANCE is not None - -nullruntimeclass = _class(None) -Class._null = nullruntimeclass - -class _instance(object): - - def __init__(self, INSTANCE): - self.__dict__["_TYPE"] = INSTANCE - INSTANCE._init_instance(self) - - def __repr__(self): - return '<%s>' % (self,) - - def __str__(self): - return '%r inst at 0x%x' % (self._TYPE._name, uid(self)) - - def __getattr__(self, name): - DEFINST, meth = self._TYPE._lookup(name) - if meth is not None: - return meth._bound(DEFINST, self) - - self._TYPE._check_field(name) - - return self.__dict__[name] - - def __setattr__(self, name, value): - self.__getattr__(name) - - FLDTYPE = self._TYPE._field_type(name) - try: - val = enforce(FLDTYPE, value) - except TypeError: - raise TypeError("Expected type %r" % FLDTYPE) - - self.__dict__[name] = value - - def __nonzero__(self): - return True # better be explicit -- overridden in _null_instance - - def __eq__(self, other): - if not isinstance(other, _instance): - raise TypeError("comparing an _instance with %r" % (other,)) - return self is other # same comment as __nonzero__ - - def __ne__(self, other): - return not (self == other) - - def _instanceof(self, INSTANCE): - assert isinstance(INSTANCE, Instance) - return bool(self) and isSubclass(self._TYPE, INSTANCE) - - def _classof(self): - assert bool(self) - return runtimeClass(self._TYPE) - - def _upcast(self, INSTANCE): - assert instanceof(self, INSTANCE) - return self - - _enforce = _upcast - - def _downcast(self, INSTANCE): - assert instanceof(self, INSTANCE) - return self - - def _identityhash(self): - return hash(self) - - def _cast_to_object(self): - return make_object(ooupcast(ROOT, self)) - - -def _null_mixin(klass): - class mixin(object): - - def __str__(self): - try: - name = self._TYPE._name - except AttributeError: - name = self._TYPE - return '%r null inst' % (name,) - - def __getattribute__(self, name): - if name.startswith("_"): - return object.__getattribute__(self, name) - - raise RuntimeError("Access to field in null object") - - def __setattr__(self, name, value): - klass.__setattr__(self, name, value) - - raise RuntimeError("Assignment to field in null object") - - def __nonzero__(self): - return False - - def __eq__(self, other): - if not isinstance(other, klass): - raise TypeError("comparing an %s with %r" % (klass.__name__, other)) - return not other - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash(self._TYPE) - return mixin - -class _null_instance(_null_mixin(_instance), _instance): - - def __init__(self, INSTANCE): - self.__dict__["_TYPE"] = INSTANCE - - -class _view(object): - - def __init__(self, INSTANCE, inst): - self.__dict__['_TYPE'] = INSTANCE - assert isinstance(inst, (_instance, _record)) - assert isinstance(inst._TYPE, Record) or isSubclass(inst._TYPE, INSTANCE) - self.__dict__['_inst'] = inst - - def __repr__(self): - if self._TYPE == self._inst._TYPE: - return repr(self._inst) - else: - return '<%r view of %s>' % (self._TYPE._name, self._inst) - - def __ne__(self, other): - return not (self == other) - - def __eq__(self, other): - if not isinstance(other, _view): - return False - a = self._inst - b = other._inst - return a.__class__ == b.__class__ and a == b - - def __hash__(self): - return hash(self._inst) + 1 - - def __nonzero__(self): - return bool(self._inst) - - def __setattr__(self, name, value): - self._TYPE._check_field(name) - setattr(self._inst, name, value) - - def __getattr__(self, name): - _, meth = self._TYPE._lookup(name) - meth or self._TYPE._check_field(name) - res = getattr(self._inst, name) - if meth: - assert isinstance(res, _bound_meth) - return res.__class__(res.DEFINST, _view(res.DEFINST, res.inst), res.meth) - return res - - def _become(self, other): - assert self._TYPE == other._TYPE - assert isinstance(other, _view) - self.__dict__['_inst'] = other._inst - - def _instanceof(self, INSTANCE): - return self._inst._instanceof(INSTANCE) - - def _classof(self): - return self._inst._classof() - - def _upcast(self, INSTANCE): - assert isSubclass(self._TYPE, INSTANCE) - return _view(INSTANCE, self._inst) - - _enforce = _upcast - - def _downcast(self, INSTANCE): - if not self._inst: - assert isSubclass(INSTANCE, self._TYPE) or isSubclass(self._TYPE, INSTANCE) - return null(INSTANCE) - assert isSubclass(INSTANCE, self._TYPE) - return _view(INSTANCE, self._inst) - - def _identityhash(self): - return self._inst._identityhash() - - def _cast_to_object(self): - return make_object(ooupcast(ROOT, self)) - -if STATICNESS: - instance_impl = _view -else: - instance_impl = _instance - -def make_string(value): - assert isinstance(value, str) - return _string(String, value) - -def make_unicode(value): - assert isinstance(value, unicode) - return _string(Unicode, value) - -def make_instance(INSTANCE): - inst = _instance(INSTANCE) - if STATICNESS: - inst = _view(INSTANCE, inst) - return inst - -def make_null_instance(INSTANCE): - inst = _null_instance(INSTANCE) - if STATICNESS: - inst = _view(INSTANCE, inst) - return inst - -def make_object(llvalue): - if llvalue: - return _object(llvalue) - else: - return NULL - -class _callable(object): - - def __init__(self, TYPE, **attrs): - self._TYPE = TYPE - self._name = "?" - self._callable = None - self.__dict__.update(attrs) - - def _checkargs(self, args, check_callable=True): - if len(args) != len(self._TYPE.ARGS): - raise TypeError,"calling %r with wrong argument number: %r" % (self._TYPE, args) - - checked_args = [] - for a, ARG in zip(args, self._TYPE.ARGS): - try: - if ARG is not Void: - a = enforce(ARG, a) - except TypeError: - raise TypeError,"calling %r with wrong argument types: %r" % (self._TYPE, args) - checked_args.append(a) - if not check_callable: - return checked_args - callb = self._callable - if callb is None: - raise RuntimeError,"calling undefined or null function" - return callb, checked_args - - def __eq__(self, other): - return (self.__class__ is other.__class__ and - self.__dict__ == other.__dict__) - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash(frozendict(self.__dict__)) - - def _cast_to_object(self): - return make_object(self) - - -class _static_meth(_callable): - allowed_types = (StaticMethod,) - - def __init__(self, STATICMETHOD, **attrs): - assert isinstance(STATICMETHOD, self.allowed_types) - _callable.__init__(self, STATICMETHOD, **attrs) - - def __call__(self, *args): - callb, checked_args = self._checkargs(args) - return callb(*checked_args) - - def __repr__(self): - return 'sm %s' % self._name - - def _as_ptr(self): - return self - -class _null_static_meth(_null_mixin(_static_meth), _static_meth): - - def __init__(self, STATICMETHOD): - self.__dict__["_TYPE"] = STATICMETHOD - self.__dict__["_name"] = "? (null)" - self.__dict__["_callable"] = None - - -class _forward_static_meth(_static_meth): - allowed_types = (StaticMethod, ForwardReference) - - def __eq__(self, other): - return self is other - - def __hash__(self): - return id(self) - - def _become(self, other): - assert isinstance(other, _static_meth) - self.__dict__ = other.__dict__ - -class _bound_meth(object): - def __init__(self, DEFINST, inst, meth): - self.DEFINST = DEFINST - self.inst = inst - self.meth = meth - - def __call__(self, *args): - callb, checked_args = self.meth._checkargs(args) - return callb(self.inst, *checked_args) - - def _cast_to_object(self): - return make_object(self) - - -class _meth(_callable): - _bound_class = _bound_meth - - def __init__(self, METHOD, **attrs): - assert isinstance(METHOD, Meth) - _callable.__init__(self, METHOD, **attrs) - - def _bound(self, DEFINST, inst): - TYPE = typeOf(inst) - assert isinstance(TYPE, (Instance, BuiltinType)) - return self._bound_class(DEFINST, inst, self) - - -class _overloaded_meth_desc: - def __init__(self, name, TYPE): - self.name = name - self.TYPE = TYPE - - -class _overloaded_bound_meth(_bound_meth): - def __init__(self, DEFINST, inst, meth): - self.DEFINST = DEFINST - self.inst = inst - self.meth = meth - - def _get_bound_meth(self, *args): - ARGS = tuple([typeOf(arg) for arg in args]) - meth = self.meth._resolver.resolve(ARGS) - assert isinstance(meth, _meth) - return meth._bound(self.DEFINST, self.inst) - - def __call__(self, *args): - bound_meth = self._get_bound_meth(*args) - return bound_meth(*args) - - -class OverloadingResolver(object): - - def __init__(self, overloadings): - self.overloadings = overloadings - self._check_overloadings() - - def _check_overloadings(self): - signatures = py.builtin.set() - for meth in self.overloadings: - ARGS = meth._TYPE.ARGS - if ARGS in signatures: - # XXX Conflict on 'Signed' vs 'SignedLongLong' on win64. - # XXX note that this partially works if this error is ignored. - raise TypeError, 'Bad overloading' - signatures.add(ARGS) - - def annotate(self, args_s): - ARGS = tuple([self.annotation_to_lltype(arg_s) for arg_s in args_s]) - METH = self.resolve(ARGS)._TYPE - return self.lltype_to_annotation(METH.RESULT) - - def resolve(self, ARGS): - # this overloading resolution algorithm is quite simple: - # 1) if there is an exact match between ARGS and meth.ARGS, return meth - # 2) if there is *only one* meth such as ARGS can be converted - # to meth.ARGS with one or more upcasts, return meth - # 3) otherwise, fail - matches = [] - for meth in self.overloadings: - METH = meth._TYPE - if METH.ARGS == ARGS: - return meth # case 1 - elif self._check_signature(ARGS, METH.ARGS): - matches.append(meth) - if len(matches) == 1: - return matches[0] - elif len(matches) > 1: - raise TypeError, 'More than one method match, please use explicit casts' - else: - raise TypeError, 'No suitable overloading found for method' - - def _check_signature(self, ARGS1, ARGS2): - if len(ARGS1) != len(ARGS2): - return False - for ARG1, ARG2 in zip(ARGS1, ARGS2): - if not self._can_convert_from_to(ARG1, ARG2): - return False - return True - - def _can_convert_from_to(self, ARG1, ARG2): - if isinstance(ARG1, Instance) and isinstance(ARG2, Instance) and isSubclass(ARG1, ARG2): - return True - else: - return False - - def annotation_to_lltype(cls, ann): - from rpython.annotator import model as annmodel - return annmodel.annotation_to_lltype(ann) - annotation_to_lltype = classmethod(annotation_to_lltype) - - def lltype_to_annotation(cls, TYPE): - from rpython.annotator import model as annmodel - return annmodel.lltype_to_annotation(TYPE) - lltype_to_annotation = classmethod(lltype_to_annotation) - - -class _overloaded_meth(_meth): - _bound_class = _overloaded_bound_meth - _desc_class = _overloaded_meth_desc - - def __init__(self, *overloadings, **attrs): - assert '_callable' not in attrs - resolver = attrs.pop('resolver', OverloadingResolver) - _meth.__init__(self, Meth([], Void), _callable=None, **attrs) # use a fake method type - self._resolver = resolver(overloadings) - - def _get_desc(self, name, ARGS): - meth = self._resolver.resolve(ARGS) - return _overloaded_meth_desc(name, meth._TYPE) - - -class _builtin_type(object): - def __getattribute__(self, name): - TYPE = object.__getattribute__(self, "_TYPE") - _, meth = TYPE._lookup(name) - if meth is not None: - res = meth._bound(TYPE, self) - res._name = name - return res - - return object.__getattribute__(self, name) - - def _cast_to_object(self): - return make_object(self) - - def _identityhash(self): - return object.__hash__(self) - -class _string(_builtin_type): - - def __init__(self, STRING, value = ''): - self._str = value - self._TYPE = STRING - - def __hash__(self): - return hash(self._str) - - def __cmp__(self, other): - return cmp(self._str, other._str) - - def __repr__(self): - return 'ootype._string(value=%r)' % self._str - - def make_string(self, value): - if self._TYPE is String: - return make_string(value) - elif self._TYPE is Unicode: - return make_unicode(value) - else: - assert False, 'Unknown type %s' % self._TYPE - - def ll_hash(self): - # NOT_RPYTHON - # hopefully, ll_hash() should not be called on NULL - assert self._str is not None - return objectmodel._hash_string(self._str) - - def ll_stritem_nonneg(self, i): - # NOT_RPYTHON - s = self._str - assert 0 <= i < len(s) - return s[i] - - def ll_strlen(self): - # NOT_RPYTHON - return len(self._str) - - def ll_strconcat(self, s): - # NOT_RPYTHON - return self.make_string(self._str + s._str) - - def ll_streq(self, s): - # NOT_RPYTON - return self._str == s._str - - def ll_strcmp(self, s): - # NOT_RPYTHON - return cmp(self._str, s._str) - - def ll_startswith(self, s): - # NOT_RPYTHON - return self._str.startswith(s._str) - - def ll_startswith_char(self, s): - # NOT_RPYTHON - return self._str.startswith(s) - - def ll_endswith(self, s): - # NOT_RPYTHON - return self._str.endswith(s._str) - - def ll_endswith_char(self, s): - # NOT_RPYTHON - return self._str.endswith(s) - - def ll_find(self, s, start, end): - # NOT_RPYTHON - if start > len(self._str): # workaround to cope with corner case - return -1 # bugs in CPython 2.4 unicode.find('') - return self._str.find(s._str, start, end) - - def ll_rfind(self, s, start, end): - # NOT_RPYTHON - if start > len(self._str): # workaround to cope with corner case - return -1 # bugs in CPython 2.4 unicode.rfind('') - return self._str.rfind(s._str, start, end) - - def ll_count(self, s, start, end): - # NOT_RPYTHON - return self._str.count(s._str, start, end) - - def ll_find_char(self, ch, start, end): - # NOT_RPYTHON - return self._str.find(ch, start, end) - - def ll_rfind_char(self, ch, start, end): - # NOT_RPYTHON - return self._str.rfind(ch, start, end) - - def ll_count_char(self, ch, start, end): - # NOT_RPYTHON - return self._str.count(ch, start, end) - - def ll_strip(self, ch, left, right): - # NOT_RPYTHON - s = self._str - if left: - s = s.lstrip(ch) - if right: - s = s.rstrip(ch) - return self.make_string(s) - - def ll_upper(self): - # NOT_RPYTHON - return self.make_string(self._str.upper()) - - def ll_lower(self): - # NOT_RPYTHON - return self.make_string(self._str.lower()) - - def ll_substring(self, start, count): - # NOT_RPYTHON - return self.make_string(self._str[start:start+count]) - - def ll_split_chr(self, ch, max): - # NOT_RPYTHON - l = [self.make_string(s) for s in self._str.split(ch, max)] - res = _array(Array(self._TYPE), len(l)) - res._array[:] = l - return res - - def ll_rsplit_chr(self, ch, max): - # NOT_RPYTHON - l = [self.make_string(s) for s in self._str.rsplit(ch, max)] - res = _array(Array(self._TYPE), len(l)) - res._array[:] = l - return res - - def ll_contains(self, ch): - # NOT_RPYTHON - return ch in self._str - - def ll_replace_chr_chr(self, ch1, ch2): - # NOT_RPYTHON - return self.make_string(self._str.replace(ch1, ch2)) - -class _null_string(_null_mixin(_string), _string): - def __init__(self, STRING): - self.__dict__["_TYPE"] = STRING - self.__dict__["_str"] = None - -class _string_builder(_builtin_type): - def __init__(self, STRING_BUILDER): - self._TYPE = STRING_BUILDER - self._buf = [] - - def ll_allocate(self, n): - assert isinstance(n, int) - assert n >= 0 - # do nothing - - def ll_append_char(self, ch): - assert isinstance(ch, basestring) and len(ch) == 1 - self._buf.append(ch) - - def ll_append(self, s): - assert isinstance(s, _string) - self._buf.append(s._str) - - def ll_build(self): - if self._TYPE is StringBuilder: - return make_string(''.join(self._buf)) - else: - return make_unicode(u''.join(self._buf)) - - def ll_getlength(self): - return self.ll_build().ll_strlen() - -class _null_string_builder(_null_mixin(_string_builder), _string_builder): - def __init__(self, STRING_BUILDER): - self.__dict__["_TYPE"] = STRING_BUILDER - -import weakref - -class _weak_reference(_builtin_type): - def __init__(self, WEAK_REFERENCE): - self._TYPE = WEAK_REFERENCE - self._ref = None - - def _unwrap_view(self, obj): - # we can't store directly the view inside the weakref because - # the view can be a temp object that is not referenced - # anywhere else. - while isinstance(obj, _view): - obj = obj._inst - return obj - - def ll_set(self, target): - assert isinstance(typeOf(target), Instance) - target = self._unwrap_view(target) - self._ref = weakref.ref(target) - - def ll_deref(self): - if self._ref is None: - return null(ROOT) - result = self._ref() - if result is None: - return null(ROOT) - return _view(ROOT, result) - -class _null_weak_reference(_null_mixin(_weak_reference), _weak_reference): - def __init__(self, WEAK_REFERENCE): - self.__dict__["_TYPE"] = WEAK_REFERENCE - - - -class _list(_builtin_type): - def __init__(self, LIST): - self._TYPE = LIST - self._list = [] - - # The following are implementations of the abstract list interface for - # use by the llinterpreter and ootype tests. There are NOT_RPYTHON - # because the annotator is not supposed to follow them. - - def ll_length(self): - # NOT_RPYTHON - return len(self._list) - - def _ll_resize_ge(self, length): - # NOT_RPYTHON - if len(self._list) < length: - diff = length - len(self._list) - self._list += [self._TYPE.ITEM._defl()] * diff - assert len(self._list) >= length - - def _ll_resize_le(self, length): - # NOT_RPYTHON - if length < len(self._list): - del self._list[length:] - assert len(self._list) <= length - - def _ll_resize(self, length): - # NOT_RPYTHON - if length > len(self._list): - self._ll_resize_ge(length) - elif length < len(self._list): - self._ll_resize_le(length) From noreply at buildbot.pypy.org Wed Jul 24 16:04:15 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 24 Jul 2013 16:04:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: better log output Message-ID: <20130724140416.00E791C14BB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65602:ae2fb7071cdd Date: 2013-07-24 10:08 +0200 http://bitbucket.org/pypy/pypy/changeset/ae2fb7071cdd/ Log: better log output diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -101,6 +101,8 @@ rop.MARK_OPAQUE_PTR, rop.JIT_DEBUG, rop.KEEPALIVE, + rop.QUASIIMMUT_FIELD, + rop.RECORD_KNOWN_CLASS, ): self.newops.append(op) continue diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -533,8 +533,7 @@ operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) if logger: - logger.log_loop(inputargs, operations, -2, "rewritten", - name=loopname) + logger.log_loop(inputargs, operations, -3, "rewritten") looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -21,6 +21,10 @@ debug_start("jit-log-compiling-loop") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-compiling-loop") + elif number == -3: + debug_start("jit-log-rewritten-loop") + logops = self._log_operations(inputargs, operations, ops_offset) + debug_stop("jit-log-rewritten-loop") else: debug_start("jit-log-opt-loop") debug_print("# Loop", number, '(%s)' % name, ":", type, From noreply at buildbot.pypy.org Wed Jul 24 16:04:17 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 24 Jul 2013 16:04:17 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: little improvement to register usage Message-ID: <20130724140417.3ECCF1C14BB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65603:9187e5809794 Date: 2013-07-24 14:06 +0200 http://bitbucket.org/pypy/pypy/changeset/9187e5809794/ Log: little improvement to register usage diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -437,8 +437,8 @@ # new addr in eax, save to now unused arg if for_frame: # ||retadr|x||x|x||xmm0|x||rax|x|| - mc.PUSH_r(eax.value) - # ||retadr|x||x|x||xmm0|x||rax|x||result| + # directly move to rbp + mc.MOV_rr(ebp.value, eax.value) elif IS_X86_32: mc.MOV_sr(3 * WORD, eax.value) # ||val|retadr|x|val|| @@ -473,20 +473,15 @@ else: if IS_X86_32: mc.MOV_rs(edx.value, 5 * WORD) - # ||retadr|x||x|x||xmm0|x||rax|x||result| - mc.MOVSD_xs(xmm0.value, 4 * WORD) - mc.MOV_rs(eax.value, 2 * WORD) # restore + # ||retadr|x||x|x||xmm0|x||rax|x|| + mc.MOVSD_xs(xmm0.value, 3 * WORD) + mc.MOV_rs(eax.value, WORD) # restore self._restore_exception(mc, exc0, exc1) - mc.MOV(exc0, RawEspLoc(WORD * 6, REF)) - mc.MOV(exc1, RawEspLoc(WORD * 7, INT)) - - if IS_X86_32: - mc.POP_r(ecx.value) # return value - else: - mc.POP_r(edi.value) # return value + mc.MOV(exc0, RawEspLoc(WORD * 5, REF)) + mc.MOV(exc1, RawEspLoc(WORD * 6, INT)) mc.LEA_rs(esp.value, 7 * WORD) - + # retval already in ebp mc.RET() rawstart = mc.materialize(self.cpu.asmmemmgr, []) @@ -521,9 +516,9 @@ clt.allgcrefs = [] clt.frame_info.clear() # for now - if log: - operations = self._inject_debugging_code(looptoken, operations, - 'e', looptoken.number) + # if log: + # operations = self._inject_debugging_code(looptoken, operations, + # 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -582,9 +577,9 @@ self.setup(original_loop_token) descr_number = compute_unique_id(faildescr) - if log: - operations = self._inject_debugging_code(faildescr, operations, - 'b', descr_number) + # if log: + # operations = self._inject_debugging_code(faildescr, operations, + # 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = RegAlloc(self, self.cpu.translate_support_code) @@ -838,10 +833,17 @@ self.mc.RET() def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + """Loads the shadowstack top in ebx, and returns an integer + that gives the address of the stack top. If this integer doesn't + fit in 32 bits, it will be loaded in r11. + """ rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] else: + # The integer 'rst' doesn't fit in 32 bits, so we know that + # _load_shadowstack_top_in_ebx() above loaded it in r11. + # Reuse it. Be careful not to overwrite r11 in the middle! mc.MOV_ri(X86_64_SCRATCH_REG.value, rst) # MOV r11, rootstacktop mc.MOV_rm(ebx.value, (X86_64_SCRATCH_REG.value, 0)) # MOV ebx, [r11] @@ -2160,7 +2162,7 @@ assert self.cpu.gc_ll_descr.stm from rpython.jit.backend.llsupport.gc import STMBarrierDescr assert isinstance(descr, STMBarrierDescr) - assert descr.returns_modified_object + assert descr.returns_modified_object loc_base = arglocs[0] assert isinstance(loc_base, RegLoc) # Write only a CALL to the helper prepared in advance, passing it as @@ -2182,11 +2184,8 @@ mc.CALL(imm(func)) # get result: if is_frame: - # result in register: - if IS_X86_32: - mc.MOV_rr(loc_base.value, ecx.value) - else: - mc.MOV_rr(loc_base.value, edi.value) + # result already written back to ebp + assert loc_base is ebp else: # result where argument was: mc.POP_r(loc_base.value) From noreply at buildbot.pypy.org Wed Jul 24 16:04:23 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 24 Jul 2013 16:04:23 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: attempt to use stmtlocal.py for stm_shadowstack Message-ID: <20130724140423.344FA1C14BB@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65604:709074b08a49 Date: 2013-07-24 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/709074b08a49/ Log: attempt to use stmtlocal.py for stm_shadowstack diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -34,6 +34,7 @@ from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import compute_unique_id +from rpython.jit.backend.x86 import stmtlocal class Assembler386(BaseAssembler): @@ -837,13 +838,13 @@ that gives the address of the stack top. If this integer doesn't fit in 32 bits, it will be loaded in r11. """ - rst = gcrootmap.get_root_stack_top_addr() + rst = self._get_root_stack_top_addr() + if rx86.fits_in_32bits(rst): + if gcrootmap.is_stm: + stmtlocal.tl_segment_prefix(mc) mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] else: - # The integer 'rst' doesn't fit in 32 bits, so we know that - # _load_shadowstack_top_in_ebx() above loaded it in r11. - # Reuse it. Be careful not to overwrite r11 in the middle! mc.MOV_ri(X86_64_SCRATCH_REG.value, rst) # MOV r11, rootstacktop mc.MOV_rm(ebx.value, (X86_64_SCRATCH_REG.value, 0)) # MOV ebx, [r11] @@ -854,15 +855,24 @@ rst = self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp self.mc.ADD_ri(ebx.value, WORD) + if rx86.fits_in_32bits(rst): + if gcrootmap.is_stm: + stmtlocal.tl_segment_prefix(self.mc) self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: + # The integer 'rst' doesn't fit in 32 bits, so we know that + # _load_shadowstack_top_in_ebx() above loaded it in r11. + # Reuse it. Be careful not to overwrite r11 in the middle! self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), ebx.value) # MOV [r11], ebx def _call_footer_shadowstack(self, gcrootmap): - rst = gcrootmap.get_root_stack_top_addr() + rst = self._get_root_stack_top_addr() + if rx86.fits_in_32bits(rst): + if gcrootmap.is_stm: + stmtlocal.tl_segment_prefix(self.mc) self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop @@ -1172,15 +1182,30 @@ cb = callbuilder.CallBuilder(self, fnloc, arglocs) cb.emit_no_collect() + def _get_root_stack_top_addr(self): + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + + rst = gcrootmap.get_root_stack_top_addr() + if gcrootmap.is_stm: + rst = rst - stmtlocal.threadlocal_base() + assert rst > 0 + assert rx86.fits_in_32bits(rst) + return rst + def _reload_frame_if_necessary(self, mc, align_stack=False): gc_ll_descr = self.cpu.gc_ll_descr gcrootmap = gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - rst = gcrootmap.get_root_stack_top_addr() + rst = self._get_root_stack_top_addr() + + if gcrootmap.is_stm: + stmtlocal.tl_segment_prefix(mc) mc.MOV(ecx, heap(rst)) mc.MOV(ebp, mem(ecx, -WORD)) - + # if gcrootmap and gcrootmap.is_stm: + + if not hasattr(gc_ll_descr, 'P2Wdescr'): raise Exception("unreachable code") wbdescr = gc_ll_descr.P2Wdescr From noreply at buildbot.pypy.org Wed Jul 24 16:04:36 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 16:04:36 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: be clear about conditional_call limitations. Also try to support 32bit (untested Message-ID: <20130724140436.254141C14BB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65605:26be63cd3453 Date: 2013-07-24 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/26be63cd3453/ Log: be clear about conditional_call limitations. Also try to support 32bit (untested so far) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -155,11 +155,19 @@ """ mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) - mc.SUB(esp, imm(WORD)) + if self.cpu.IS_X86_64: + mc.SUB(esp, imm(WORD)) + else: + # we want space for 3 arguments + call + alignment + # the caller is responsible for putting arguments in the right spot + mc.SUB(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 2 * WORD) # args are in their respective positions mc.CALL(eax) - mc.ADD(esp, imm(WORD)) + if self.cpu.IS_X86_64: + mc.ADD(esp, imm(WORD)) + else: + mc.ADD(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 0) self._reload_frame_if_necessary(mc, align_stack=True) self._pop_all_regs_from_frame(mc, [], supports_floats, @@ -2144,7 +2152,7 @@ def label(self): self._check_frame_depth_debug(self.mc) - def cond_call(self, op, gcmap, cond_loc, call_loc): + def cond_call(self, op, gcmap, cond_loc, call_loc, arglocs): self.mc.TEST(cond_loc, cond_loc) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() @@ -2160,6 +2168,12 @@ if self._regalloc.xrm.reg_bindings: floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] + if self.cpu.IS_X86_32: + p = -7 * WORD + for i in range(len(arglocs) - 1, -1, -1): + loc = arglocs[i] + self.mc.MOV(RawEspLoc(p), loc) + p += WORD self.mc.CALL(imm(cond_call_adr)) self.pop_gcmap(self.mc) # never any result value diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -809,12 +809,18 @@ imm = self.rm.convert_to_imm(v) self.assembler.regalloc_mov(imm, eax) args_so_far = [tmpbox] + locs = [] for i in range(2, len(args)): - reg = self.rm.register_arguments[i - 2] - self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) + if self.cpu.IS_X86_64: + reg = self.rm.register_arguments[i - 2] + self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) + else: + loc = self.make_sure_var_in_reg(args[i], args_so_far) + locs.append(loc) args_so_far.append(args[i]) loc_cond = self.make_sure_var_in_reg(args[0], args) - self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax) + self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax, + locs) self.rm.possibly_free_var(tmpbox) def consider_call_malloc_nursery(self, op): diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1355,6 +1355,13 @@ return getattr(self, 'handle_jit_marker__%s' % key)(op, jitdriver) def rewrite_op_jit_conditional_call(self, op): + have_floats = False + for arg in op.args: + if getkind(arg.concretetype) == 'float': + have_floats = True + break + if len(op.args) > 4 + 2 or have_floats: + raise Exception("Conditional call does not support floats or more than 4 arguments") callop = SpaceOperation('direct_call', op.args[1:], op.result) calldescr = self.callcontrol.getcalldescr(callop) assert not calldescr.get_extra_info().check_forces_virtual_or_virtualizable() diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -350,7 +350,7 @@ 'lllong_lshift': LLOp(canfold=True), # args (r_longlonglong, int) 'lllong_rshift': LLOp(canfold=True), # args (r_longlonglong, int) 'lllong_xor': LLOp(canfold=True), - + 'cast_primitive': LLOp(canfold=True), 'cast_bool_to_int': LLOp(canfold=True), 'cast_bool_to_uint': LLOp(canfold=True), @@ -457,6 +457,7 @@ 'jit_force_quasi_immutable': LLOp(canrun=True), 'jit_record_known_class' : LLOp(canrun=True), 'jit_ffi_save_result': LLOp(canrun=True), + 'jit_conditional_call': LLOp(), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -29,7 +29,7 @@ __slots__ = """graph db gcpolicy exception_policy more_ll_values - vars all_cached_consts + vars all_cached_consts illtypes functionname blocknum @@ -59,7 +59,7 @@ if isinstance(T, Ptr) and T.TO.__class__ == ForwardReference: continue db.gettype(T) # force the type to be considered by the database - + self.illtypes = None def collect_var_and_types(self): @@ -90,7 +90,7 @@ for cleanupop in exc_cleanup_ops: mix.extend(cleanupop.args) mix.append(cleanupop.result) - + uniquemix = [] seen = identity_dict() for v in mix: @@ -454,6 +454,9 @@ fnexpr = '((%s)%s)' % (cdecl(typename, ''), self.expr(fnaddr)) return self.generic_call(FUNC, fnexpr, op.args[1:], op.result) + def OP_JIT_CONDITIONAL_CALL(self, op): + return '' + # low-level operations def generic_get(self, op, sourceexpr): T = self.lltypemap(op.result) @@ -580,7 +583,7 @@ def OP_PTR_ISZERO(self, op): return '%s = (%s == NULL);' % (self.expr(op.result), self.expr(op.args[0])) - + def OP_PTR_EQ(self, op): return '%s = (%s == %s);' % (self.expr(op.result), self.expr(op.args[0]), @@ -627,7 +630,7 @@ ARRAY = self.lltypemap(op.args[0]).TO if ARRAY._hints.get("render_as_void"): return '%s = (char *)%s + %s;' % ( - self.expr(op.result), + self.expr(op.result), self.expr(op.args[0]), self.expr(op.args[1])) else: @@ -652,7 +655,7 @@ def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) typename = self.db.gettype(TYPE) - return "%s = (%s)%s;" % (self.expr(op.result), cdecl(typename, ""), + return "%s = (%s)%s;" % (self.expr(op.result), cdecl(typename, ""), self.expr(op.args[0])) def OP_SAME_AS(self, op): @@ -711,7 +714,7 @@ val = "(unsigned char)%s" % val elif ORIG is UniChar: val = "(unsigned long)%s" % val - typename = cdecl(self.db.gettype(TYPE), '') + typename = cdecl(self.db.gettype(TYPE), '') return "%(result)s = (%(typename)s)(%(val)s);" % locals() OP_FORCE_CAST = OP_CAST_PRIMITIVE # xxx the same logic works @@ -823,7 +826,7 @@ counter_label+1) counter_label = self.expr(op.args[1]) return 'PYPY_INSTRUMENT_COUNT(%s);' % counter_label - + def OP_IS_EARLY_CONSTANT(self, op): return '%s = 0; /* IS_EARLY_CONSTANT */' % (self.expr(op.result),) From noreply at buildbot.pypy.org Wed Jul 24 16:08:35 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 16:08:35 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: fix Message-ID: <20130724140835.0505D1C155C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65606:04fbc9dabd84 Date: 2013-07-24 16:08 +0200 http://bitbucket.org/pypy/pypy/changeset/04fbc9dabd84/ Log: fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -155,7 +155,7 @@ """ mc = codebuf.MachineCodeBlockWrapper() self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) - if self.cpu.IS_X86_64: + if IS_X86_64: mc.SUB(esp, imm(WORD)) else: # we want space for 3 arguments + call + alignment @@ -164,7 +164,7 @@ self.set_extra_stack_depth(mc, 2 * WORD) # args are in their respective positions mc.CALL(eax) - if self.cpu.IS_X86_64: + if IS_X86_64: mc.ADD(esp, imm(WORD)) else: mc.ADD(esp, imm(WORD * 7)) @@ -2168,7 +2168,7 @@ if self._regalloc.xrm.reg_bindings: floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] - if self.cpu.IS_X86_32: + if IS_X86_32: p = -7 * WORD for i in range(len(arglocs) - 1, -1, -1): loc = arglocs[i] diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -811,7 +811,7 @@ args_so_far = [tmpbox] locs = [] for i in range(2, len(args)): - if self.cpu.IS_X86_64: + if IS_X86_64: reg = self.rm.register_arguments[i - 2] self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) else: From noreply at buildbot.pypy.org Wed Jul 24 16:12:31 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 24 Jul 2013 16:12:31 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130724141231.3A4721C0149@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65607:c844d22e5405 Date: 2013-07-24 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/c844d22e5405/ Log: hg merge default diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,60 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. + +Highlights +========== + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +* During packaging, compile the CFFI tk extension. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -150,7 +150,7 @@ return self.do_and_wrap(w_ffitype) -class TestFromAppLevel(object): +class TestToAppLevel(object): spaceconfig = dict(usemodules=('_ffi',)) def setup_class(cls): diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -32,21 +33,21 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'statvfs_result': 'app_posix.statvfs_result', - 'fdopen' : 'app_posix.fdopen', - 'tmpfile' : 'app_posix.tmpfile', - 'popen' : 'app_posix.popen', - 'tmpnam' : 'app_posix.tmpnam', - 'tempnam' : 'app_posix.tempnam', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'fdopen': 'app_posix.fdopen', + 'tmpfile': 'app_posix.tmpfile', + 'popen': 'app_posix.popen', + 'tmpnam': 'app_posix.tmpnam', + 'tempnam': 'app_posix.tempnam', } if os.name == 'nt': appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -54,44 +55,46 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', } if hasattr(os, 'chown'): @@ -168,9 +171,9 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: @@ -178,7 +181,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -187,7 +190,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -195,7 +198,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,15 +1,17 @@ -from pypy.interpreter.gateway import unwrap_spec +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat from pypy.module.sys.interp_encoding import getfilesystemencoding -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -213,6 +215,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -253,6 +256,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -314,6 +327,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstatvfs(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -169,7 +169,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -183,6 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,7 +125,12 @@ self.llbox = llbox def descr_getint(self, space): - return space.wrap(jit_hooks.box_getint(self.llbox)) + try: + value = jit_hooks.box_getint(self.llbox) + except NotImplementedError: + raise OperationError(space.w_NotImplementedError, + space.wrap("Box has no int value")) + return space.wrap(value) @unwrap_spec(no=int) def descr_new_box(space, w_tp, no): @@ -182,7 +187,12 @@ @unwrap_spec(no=int) def descr_getarg(self, space, no): - return WrappedBox(jit_hooks.resop_getarg(self.op, no)) + try: + box = jit_hooks.resop_getarg(self.op, no) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("Index out of range")) + return WrappedBox(box) @unwrap_spec(no=int, w_box=WrappedBox) def descr_setarg(self, space, no, w_box): @@ -232,7 +242,8 @@ getarg = interp2app(WrappedOp.descr_getarg), setarg = interp2app(WrappedOp.descr_setarg), result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult) + WrappedOp.descr_setresult), + offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.acceptable_as_base_class = False @@ -342,6 +353,10 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), + asmaddr = interp_attrproperty('asmaddr', cls=W_JitLoopInfo, + doc="Address of machine code"), + asmlen = interp_attrproperty('asmlen', cls=W_JitLoopInfo, + doc="Length of machine code"), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -71,7 +71,7 @@ greenkey) di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'loop', greenkey) - di_loop.asminfo = AsmInfo(offset, 0, 0) + di_loop.asminfo = AsmInfo(offset, 0x42, 12) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'bridge', fail_descr=BasicFailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) @@ -123,6 +123,8 @@ assert info.greenkey[2] == False assert info.loop_no == 0 assert info.type == 'loop' + assert info.asmaddr == 0x42 + assert info.asmlen == 12 raises(TypeError, 'info.bridge_no') assert len(info.operations) == 4 int_add = info.operations[0] @@ -132,8 +134,10 @@ assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 assert dmp.call_id == 0 + assert dmp.offset == -1 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num + assert int_add.offset == 0 self.on_compile_bridge() expected = ('>' % repr(self.f.func_code)) @@ -160,6 +164,20 @@ assert 'jit hook' in s.getvalue() assert 'ZeroDivisionError' in s.getvalue() + def test_on_compile_crashes(self): + import pypyjit + loops = [] + def hook(loop): + loops.append(loop) + pypyjit.set_compile_hook(hook) + self.on_compile() + loop = loops[0] + op = loop.operations[2] + # Should not crash the interpreter + raises(IndexError, op.getarg, 2) + assert op.name == 'guard_nonnull' + raises(NotImplementedError, op.getarg(0).getint) + def test_non_reentrant(self): import pypyjit l = [] diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -33,10 +33,8 @@ def test_resource(): - try: - import lib_pypy.resource - except ImportError: - py.test.skip('no syslog on this platform') + if sys.platform == 'win32': + py.test.skip('no resource module on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/pypy/module/test_lib_pypy/test_md5_extra.py b/pypy/module/test_lib_pypy/test_md5_extra.py --- a/pypy/module/test_lib_pypy/test_md5_extra.py +++ b/pypy/module/test_lib_pypy/test_md5_extra.py @@ -1,227 +1,226 @@ """A test script to compare MD5 implementations. -A note about performance: the pure Python MD5 takes roughly -160 sec. per MB of data on a 233 MHz Intel Pentium CPU. +A note about performance: the pure Python MD5 takes roughly 160 sec. per +MB of data on a 233 MHz Intel Pentium CPU. """ +import md5 -from __future__ import absolute_import -import md5 # CPython's implementation in C. -from lib_pypy import _md5 as pymd5 +from pypy.module.test_lib_pypy.support import import_lib_pypy -# Helpers... +def compare_host(message, d2, d2h): + """Compare results against the host Python's builtin md5. -def formatHex(str): - "Print a string's HEX code in groups of two digits." - - d = map(None, str) - d = map(ord, d) - d = map(lambda x:"%02x" % x, d) - return ' '.join(d) - - -def format(str): - "Print a string as-is in groups of two characters." - - s = '' - for i in range(0, len(str)-1, 2): - s = s + "%03s" % str[i:i+2] - return s[1:] - - -def printDiff(message, d1, d2, expectedResult=None): - "Print different outputs for same message." - - print "Message: '%s'" % message - print "Message length: %d" % len(message) - if expectedResult: - print "%-48s (expected)" % format(expectedResult) - print "%-48s (Std. lib. MD5)" % formatHex(d1) - print "%-48s (Pure Python MD5)" % formatHex(d2) - print - - -# The real comparison function. - -def compareImp(message): - """Compare two MD5 implementations, C vs. pure Python module. - - For equal digests this returns None, otherwise it returns - a tuple of both digests. + For equal digests this returns None, otherwise it returns a tuple of + both digests. """ - - # Use Python's standard library MD5 compiled C module. + # Use the host Python's standard library MD5 compiled C module. m1 = md5.md5() m1.update(message) d1 = m1.digest() d1h = m1.hexdigest() - - # Use MD5 module in pure Python. - m2 = pymd5.new() - m2.update(message) - d2 = m2.digest() - d2h = m2.hexdigest() + # Return None if equal or the different digests if not equal. + return None if d1 == d2 and d1h == d2h else (d1, d2) - # Return None if equal or the different digests if not equal. - if d1 == d2 and d1h == d2h: - return - else: - return d1, d2 +class TestMD5Update: -class TestMD5Compare: - "Compare pure Python MD5 against Python's std. lib. version." - + spaceconfig = dict(usemodules=('struct',)) + + def test_update(self): + """Test updating cloned objects.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) + space = self.space + w__md5 = import_lib_pypy(space, '_md5') + + # Load both with same prefix. + prefix1 = 2**10 * 'a' + + # The host md5 + m1 = md5.md5() + m1.update(prefix1) + m1c = m1.copy() + + # The app-level _md5 + w_m2 = space.call_method(w__md5, 'new') + space.call_method(w_m2, 'update', space.wrap(prefix1)) + w_m2c = space.call_method(w_m2, 'copy') + + # Update and compare... + for i in range(len(cases)): + message = cases[i][0] + + m1c.update(message) + d1 = m1c.hexdigest() + + space.call_method(w_m2c, 'update', space.wrap(message)) + w_d2 = space.call_method(w_m2c, 'hexdigest') + d2 = space.str_w(w_d2) + + assert d1 == d2 + + +class AppTestMD5Compare: + """Compare pure Python MD5 against Python's std. lib. version.""" + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + from pypy.interpreter import gateway + space = cls.space + cls.w__md5 = import_lib_pypy(space, '_md5') + if cls.runappdirect: + # interp2app doesn't work in appdirect mode + cls.w_compare_host = staticmethod(compare_host) + else: + compare_host.unwrap_spec = [str, str, str] + cls.w_compare_host = space.wrap(gateway.interp2app(compare_host)) + + def w_compare(self, message): + # Generate results against the app-level pure Python MD5 and + # pass them off for comparison against the host Python's MD5 + m2 = self._md5.new() + m2.update(message) + return self.compare_host(message, m2.digest(), m2.hexdigest()) + + def w__format_hex(self, string): + """Print a string's HEX code in groups of two digits.""" + d = map(None, string) + d = map(ord, d) + d = map(lambda x: "%02x" % x, d) + return ' '.join(d) + + def w__format(self, string): + """Print a string as-is in groups of two characters.""" + s = '' + for i in range(0, len(string) - 1, 2): + s = s + "%03s" % string[i:i + 2] + return s[1:] + + def w_print_diff(self, message, d1, d2, expectedResult=None): + """Print different outputs for same message.""" + print("Message: '%s'" % message) + print("Message length: %d" % len(message)) + if expectedResult: + print("%-48s (expected)" % self._format(expectedResult)) + print("%-48s (Std. lib. MD5)" % self._format_hex(d1)) + print("%-48s (Pure Python MD5)" % self._format_hex(d2)) + print() + def test1(self): - "Test cases with known digest result." - + """Test cases with known digest result.""" cases = ( - ("", - "d41d8cd98f00b204e9800998ecf8427e"), - ("a", - "0cc175b9c0f1b6a831c399e269772661"), - ("abc", - "900150983cd24fb0d6963f7d28e17f72"), - ("message digest", - "f96b697d7cb7938d525a2f31aaf161d0"), - ("abcdefghijklmnopqrstuvwxyz", - "c3fcd3d76192e4007dfb496cca67e13b"), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "d174ab98d277d9f5a5611c2c9f419d9f"), - ("1234567890"*8, - "57edf4a22be3c955ac49da2e2107b67a"), - ) + ("", + "d41d8cd98f00b204e9800998ecf8427e"), + ("a", + "0cc175b9c0f1b6a831c399e269772661"), + ("abc", + "900150983cd24fb0d6963f7d28e17f72"), + ("message digest", + "f96b697d7cb7938d525a2f31aaf161d0"), + ("abcdefghijklmnopqrstuvwxyz", + "c3fcd3d76192e4007dfb496cca67e13b"), + ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "d174ab98d277d9f5a5611c2c9f419d9f"), + ("1234567890"*8, + "57edf4a22be3c955ac49da2e2107b67a"), + ) - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message, expectedResult = cases[i][0], None if len(cases[i]) == 2: expectedResult = cases[i][1] - printDiff(message, d1, d2, expectedResult) + self.print_diff(message, d1, d2, expectedResult) assert res is None + def test2(self): + """Test cases without known digest result.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) - def test2(self): - "Test cases without known digest result." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None + def test3(self): + """Test cases with long messages (can take a while).""" + cases = ( + (2**10*'a',), + (2**10*'abcd',), + #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium. + ) - def test3(self): - "Test cases with long messages (can take a while)." - - cases = ( - (2**10*'a',), - (2**10*'abcd',), -## (2**20*'a',), ## 1 MB, takes about 160 sec. on a 233 Mhz Pentium. - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - def test4(self): - "Test cases with increasingly growing message lengths." - + """Test cases with increasingly growing message lengths.""" i = 0 - while i < 2**5: + while i < 2**5: message = i * 'a' - res = compareImp(message) + res = self.compare(message) if res is not None: d1, d2 = res - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - i = i + 1 + i += 1 - - def test5(self): - "Test updating cloned objects." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - # Load both with same prefix. - prefix1 = 2**10 * 'a' - - m1 = md5.md5() - m1.update(prefix1) - m1c = m1.copy() - - m2 = pymd5.new() - m2.update(prefix1) - m2c = m2.copy() - - # Update and compare... - for i in xrange(len(cases)): - message = cases[i][0] - - m1c.update(message) - d1 = m1c.hexdigest() - - m2c.update(message) - d2 = m2c.hexdigest() - - assert d1 == d2 - - -def test_attributes(): - assert pymd5.digest_size == 16 - assert pymd5.new().digest_size == 16 - assert pymd5.new().digestsize == 16 - assert pymd5.new().block_size == 64 + def test_attributes(self): + _md5 = self._md5 + assert _md5.digest_size == 16 + assert _md5.new().digest_size == 16 + assert _md5.new().digestsize == 16 + assert _md5.new().block_size == 64 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -735,6 +735,10 @@ self.mc.RET() def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + """Loads the shadowstack top in ebx, and returns an integer + that gives the address of the stack top. If this integer doesn't + fit in 32 bits, it will be loaded in r11. + """ rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] @@ -752,6 +756,9 @@ if rx86.fits_in_32bits(rst): self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: + # The integer 'rst' doesn't fit in 32 bits, so we know that + # _load_shadowstack_top_in_ebx() above loaded it in r11. + # Reuse it. Be careful not to overwrite r11 in the middle! self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), ebx.value) # MOV [r11], ebx diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -154,6 +154,15 @@ else: return os.lstat(path.as_bytes()) + + at specialize.argtype(0) +def statvfs(path): + if isinstance(path, str): + return os.statvfs(path) + else: + return os.statvfs(path.as_bytes()) + + @specialize.argtype(0) def unlink(path): if isinstance(path, str): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1698,6 +1698,18 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_stat_variant('lstat', traits) + @registering_if(os, 'fstatvfs') + def register_os_fstatvfs(self): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) + + if hasattr(os, 'statvfs'): + @registering_str_unicode(os.statvfs) + def register_os_statvfs(self, traits): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('statvfs', traits) + + # ------------------------------- os.W* --------------------------------- w_star = ['WCOREDUMP', 'WIFCONTINUED', 'WIFSTOPPED', diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -2,20 +2,22 @@ and os.fstat(). In RPython like in plain Python the stat result can be indexed like a tuple but also exposes the st_xxx attributes. """ -import os, sys + +import os +import sys + from rpython.annotator import model as annmodel -from rpython.tool.pairtype import pairtype -from rpython.tool.sourcetools import func_with_new_name, func_renamer -from rpython.rtyper import extregistry -from rpython.rtyper.extfunc import register_external, extdef -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask -from rpython.rlib.objectmodel import specialize +from rpython.rtyper import extregistry +from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.extfunc import extdef +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.tool import rffi_platform as platform +from rpython.tool.pairtype import pairtype +from rpython.tool.sourcetools import func_renamer from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.annlowlevel import hlstr # Support for float times is here. # - ALL_STAT_FIELDS contains Float fields if the system can retrieve @@ -47,12 +49,26 @@ ("st_flags", lltype.Signed), #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented - ] +] N_INDEXABLE_FIELDS = 10 # For OO backends, expose only the portable fields (the first 10). PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] +STATVFS_FIELDS = [ + ("f_bsize", lltype.Signed), + ("f_frsize", lltype.Signed), + ("f_blocks", lltype.Signed), + ("f_bfree", lltype.Signed), + ("f_bavail", lltype.Signed), + ("f_files", lltype.Signed), + ("f_ffree", lltype.Signed), + ("f_favail", lltype.Signed), + ("f_flag", lltype.Signed), + ("f_namemax", lltype.Signed), +] + + # ____________________________________________________________ # # Annotation support @@ -79,6 +95,7 @@ def stat_result_reduce(st): return (st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7], st[8], st[9]) + def stat_result_recreate(tup): return make_stat_result(tup + extra_zeroes) s_reduced = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) @@ -86,6 +103,26 @@ extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) return s_reduced, stat_result_reduce, stat_result_recreate + +class SomeStatvfsResult(annmodel.SomeObject): + if hasattr(os, 'statvfs_result'): + knowntype = os.statvfs_result + else: + knowntype = None # will not be used + + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.module import r_os_stat + return r_os_stat.StatvfsResultRepr(rtyper) + + def rtyper_makekey_ex(self, rtyper): + return self.__class__, + + def getattr(self, s_attr): + assert s_attr.is_constant() + TYPE = STATVFS_FIELD_TYPES[s_attr.const] + return annmodel.lltype_to_annotation(TYPE) + + class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): def getitem((s_sta, s_int)): assert s_int.is_constant(), "os.stat()[index]: index must be constant" @@ -94,7 +131,17 @@ name, TYPE = STAT_FIELDS[index] return annmodel.lltype_to_annotation(TYPE) + +class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): + def getitem((s_stat, s_int)): + assert s_int.is_constant() + name, TYPE = STATVFS_FIELDS[s_int.const] + return annmodel.lltype_to_annotation(TYPE) + + s_StatResult = SomeStatResult() +s_StatvfsResult = SomeStatvfsResult() + def make_stat_result(tup): """Turn a tuple into an os.stat_result object.""" @@ -104,6 +151,11 @@ kwds[name] = tup[N_INDEXABLE_FIELDS + i] return os.stat_result(positional, kwds) + +def make_statvfs_result(tup): + return os.statvfs_result(tup) + + class MakeStatResultEntry(extregistry.ExtRegistryEntry): _about_ = make_stat_result @@ -114,22 +166,33 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.specialize_make_stat_result(hop) + +class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_statvfs_result + + def compute_result_annotation(self, s_tup): + return s_StatvfsResult + + def specialize_call(self, hop): + from rpython.rtyper.module import r_os_stat + return r_os_stat.specialize_make_statvfs_result(hop) + # ____________________________________________________________ # # RFFI support if sys.platform.startswith('win'): _name_struct_stat = '_stati64' - INCLUDES = ['sys/types.h', 'sys/stat.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] else: _name_struct_stat = 'stat' - INCLUDES = ['sys/types.h', 'sys/stat.h', 'unistd.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] compilation_info = ExternalCompilationInfo( # This must be set to 64 on some systems to enable large file support. #pre_include_bits = ['#define _FILE_OFFSET_BITS 64'], # ^^^ nowadays it's always set in all C files we produce. - includes = INCLUDES + includes=INCLUDES ) if TIMESPEC is not None: @@ -141,7 +204,7 @@ def posix_declaration(try_to_add=None): - global STAT_STRUCT + global STAT_STRUCT, STATVFS_STRUCT LL_STAT_FIELDS = STAT_FIELDS[:] if try_to_add: @@ -173,15 +236,17 @@ class CConfig: _compilation_info_ = compilation_info STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) + STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS) + try: - config = platform.configure(CConfig, ignore_errors= - try_to_add is not None) + config = platform.configure(CConfig, ignore_errors=try_to_add is not None) except platform.CompilationError: if try_to_add: return # failed to add this field, give up raise STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) + STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT']) if try_to_add: STAT_FIELDS.append(try_to_add) @@ -202,6 +267,9 @@ STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] del _name, _TYPE +STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) +STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] + def build_stat_result(st): # only for LL backends @@ -233,6 +301,21 @@ return make_stat_result(result) +def build_statvfs_result(st): + return make_statvfs_result(( + st.c_f_bsize, + st.c_f_frsize, + st.c_f_blocks, + st.c_f_bfree, + st.c_f_bavail, + st.c_f_files, + st.c_f_ffree, + st.c_f_favail, + st.c_f_flag, + st.c_f_namemax + )) + + def register_stat_variant(name, traits): if name != 'fstat': arg_is_path = True @@ -301,6 +384,56 @@ [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl) + +def register_statvfs_variant(name, traits): + if name != 'fstatvfs': + arg_is_path = True + s_arg = traits.str0 + ARG1 = traits.CCHARP + else: + arg_is_path = False + s_arg = int + ARG1 = rffi.INT + + posix_mystatvfs = rffi.llexternal(name, + [ARG1, STATVFS_STRUCT], rffi.INT, + compilation_info=compilation_info + ) + + @func_renamer('os_%s_llimpl' % (name,)) + def posix_statvfs_llimpl(arg): + stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw') + try: + if arg_is_path: + arg = traits.str2charp(arg) + error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult)) + if arg_is_path: + traits.free_charp(arg) + if error != 0: + raise OSError(rposix.get_errno(), "os_?statvfs failed") + return build_statvfs_result(stresult) + finally: + lltype.free(stresult, flavor='raw') + + @func_renamer('os_%s_fake' % (name,)) + def posix_fakeimpl(arg): + if s_arg == traits.str0: + arg = hlstr(arg) + st = getattr(os, name)(arg) + fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS] + TP = TUPLE_TYPE(fields) + ll_tup = lltype.malloc(TP.TO) + for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS): + val = getattr(st, fieldname) + rffi.setintfield(ll_tup, 'item%d' % i, int(val)) + return ll_tup + + return extdef( + [s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,), + llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl + ) + + def make_win32_stat_impl(name, traits): from rpython.rlib import rwin32 from rpython.rtyper.module.ll_win32file import make_win32_traits diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -67,3 +67,52 @@ # no-op conversion from r_StatResult.r_tuple to r_StatResult hop.exception_cannot_occur() return v_result + + +class StatvfsResultRepr(Repr): + + def __init__(self, rtyper): + self.rtyper = rtyper + self.statvfs_fields = ll_os_stat.STATVFS_FIELDS + + self.statvfs_field_indexes = {} + for i, (name, TYPE) in enumerate(self.statvfs_fields): + self.statvfs_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + for name, TYPE in self.statvfs_fields]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + hop2.forced_opname = 'getitem' + hop2.args_v = [hop2.args_v[0], Constant(index)] + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.statvfs_field_indexes[attr] + except KeyError: + raise TyperError("os.statvfs().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + + +def specialize_make_statvfs_result(hop): + r_StatvfsResult = hop.rtyper.getrepr(ll_os_stat.s_StatvfsResult) + [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) + hop.exception_cannot_occur() + return v_result diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -478,7 +478,7 @@ 'rep', 'movs', 'movhp', 'lods', 'stos', 'scas', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', - 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', + 'cvt', 'ucomi', 'comi', 'subs', 'subp', 'adds', 'addp', 'xorp', 'movap', 'movd', 'movlp', 'movup', 'sqrt', 'rsqrt', 'movhlp', 'movlhp', 'mins', 'minp', 'maxs', 'maxp', 'unpck', 'pxor', 'por', # sse2 'shufps', 'shufpd', @@ -495,13 +495,15 @@ # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers - 'movz', + 'movz', # locked operations should not move GC pointers, at least so far 'lock', 'pause', # non-temporal moves should be reserved for areas containing # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', - ]) + # bit manipulations + 'bextr', + ]) # a partial list is hopefully good enough for now; it's all to support # only one corner case, tested in elf64/track_zero.s @@ -741,7 +743,7 @@ # tail-calls are equivalent to RET for us return InsnRet(self.CALLEE_SAVE_REGISTERS) return InsnStop("jump") - + def register_jump_to(self, label, lastinsn=None): if lastinsn is None: lastinsn = self.insns[-1] @@ -1020,7 +1022,7 @@ visit_movl = visit_mov visit_xorl = _maybe_32bit_dest(FunctionGcRootTracker.binary_insn) - + visit_pushq = FunctionGcRootTracker._visit_push visit_addq = FunctionGcRootTracker._visit_add From noreply at buildbot.pypy.org Wed Jul 24 16:14:56 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 16:14:56 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: fix Message-ID: <20130724141456.EA9191C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65608:67eec70cba8b Date: 2013-07-24 14:20 +0000 http://bitbucket.org/pypy/pypy/changeset/67eec70cba8b/ Log: fix diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2288,18 +2288,20 @@ 'calldescr': calldescr}) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, 1.2, 3.4) + f1 = longlong.getfloatstorage(1.2) + f2 = longlong.getfloatstorage(3.4) + frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, f1, f2) assert not called for j in range(5): assert self.cpu.get_int_value(frame, j) == j - assert self.cpu.get_float_value(frame, 6) == 1.2 - assert self.cpu.get_float_value(frame, 7) == 3.4 - frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, 1.2, 3.4) + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 6)) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 7)) == 3.4 + frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, f1, f2) assert called == [tuple(range(1, i + 1))] for j in range(4): assert self.cpu.get_int_value(frame, j + 1) == j + 1 - assert self.cpu.get_float_value(frame, 6) == 1.2 - assert self.cpu.get_float_value(frame, 7) == 3.4 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 6)) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 7)) == 3.4 def test_force_operations_returning_void(self): values = [] diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2169,10 +2169,9 @@ floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] if IS_X86_32: - p = -7 * WORD - for i in range(len(arglocs) - 1, -1, -1): - loc = arglocs[i] - self.mc.MOV(RawEspLoc(p), loc) + p = -8 * WORD + for loc in arglocs: + self.mc.MOV(RawEspLoc(p, INT), loc) p += WORD self.mc.CALL(imm(cond_call_adr)) self.pop_gcmap(self.mc) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -89,7 +89,6 @@ _location_code = 's' def __init__(self, value, type): - assert value >= 0 self.value = value self.type = type From noreply at buildbot.pypy.org Wed Jul 24 16:16:24 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Wed, 24 Jul 2013 16:16:24 +0200 (CEST) Subject: [pypy-commit] pypy statvfs_tests: added tests for os.statvfs and os.fstatvfs. Message-ID: <20130724141624.550CC1C0149@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: statvfs_tests Changeset: r65609:71a9eb74dc81 Date: 2013-07-23 22:20 -0400 http://bitbucket.org/pypy/pypy/changeset/71a9eb74dc81/ Log: added tests for os.statvfs and os.fstatvfs. diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -46,6 +46,26 @@ data = getllimpl(os.getlogin)() assert data == expected +def test_statvfs(): + if not hasattr(os, 'statvfs'): + py.test.skip('posix specific function') + try: + expected = os.statvfs('.') + except OSError, e: + py.test.skip("the underlying os.statvfs() failed: %s" % e) + data = getllimpl(os.statvfs)('.') + assert data == expected + +def test_fstatvfs(): + if not hasattr(os, 'fstatvfs'): + py.test.skip('posix specific function') + try: + expected = os.fstatvfs(0) + except OSError, e: + py.test.skip("the underlying os.fstatvfs() failed: %s" % e) + data = getllimpl(os.fstatvfs)(0) + assert data == expected + def test_utimes(): if os.name != 'nt': py.test.skip('Windows specific feature') From noreply at buildbot.pypy.org Wed Jul 24 16:16:25 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 24 Jul 2013 16:16:25 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in andrewsmedina/numpypy/statvfs_tests (pull request #166) Message-ID: <20130724141625.D077F1C0149@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65610:82c39b75f8a5 Date: 2013-07-24 07:15 -0700 http://bitbucket.org/pypy/pypy/changeset/82c39b75f8a5/ Log: Merged in andrewsmedina/numpypy/statvfs_tests (pull request #166) added some tests for os.statvfs and os.fstatvfs diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -46,6 +46,26 @@ data = getllimpl(os.getlogin)() assert data == expected +def test_statvfs(): + if not hasattr(os, 'statvfs'): + py.test.skip('posix specific function') + try: + expected = os.statvfs('.') + except OSError, e: + py.test.skip("the underlying os.statvfs() failed: %s" % e) + data = getllimpl(os.statvfs)('.') + assert data == expected + +def test_fstatvfs(): + if not hasattr(os, 'fstatvfs'): + py.test.skip('posix specific function') + try: + expected = os.fstatvfs(0) + except OSError, e: + py.test.skip("the underlying os.fstatvfs() failed: %s" % e) + data = getllimpl(os.fstatvfs)(0) + assert data == expected + def test_utimes(): if os.name != 'nt': py.test.skip('Windows specific feature') From noreply at buildbot.pypy.org Wed Jul 24 16:29:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 16:29:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20130724142912.9C74E1C0149@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65611:6f92256ff372 Date: 2013-07-24 16:28 +0200 http://bitbucket.org/pypy/pypy/changeset/6f92256ff372/ Log: Translation fix diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,12 +125,10 @@ self.llbox = llbox def descr_getint(self, space): - try: - value = jit_hooks.box_getint(self.llbox) - except NotImplementedError: + if not jit_hooks.box_isint(self.llbox): raise OperationError(space.w_NotImplementedError, space.wrap("Box has no int value")) - return space.wrap(value) + return space.wrap(jit_hooks.box_getint(self.llbox)) @unwrap_spec(no=int) def descr_new_box(space, w_tp, no): diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -111,6 +111,11 @@ from rpython.jit.metainterp.history import Const return isinstance(_cast_to_box(llbox), Const) + at register_helper(annmodel.SomeBool()) +def box_isint(llbox): + from rpython.jit.metainterp.history import INT + return _cast_to_box(llbox).type == INT + # ------------------------- stats interface --------------------------- @register_helper(annmodel.SomeBool()) From noreply at buildbot.pypy.org Wed Jul 24 16:49:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 16:49:13 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Tweak the code for the case of a stub in h_original (tested by run #931 Message-ID: <20130724144913.977071C02A1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r430:5c385b2b629a Date: 2013-07-24 16:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/5c385b2b629a/ Log: Tweak the code for the case of a stub in h_original (tested by run #931 of test_more_multi_thread) diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -264,7 +264,10 @@ where backup is stolen and its h-original points to it. */ - assert(stmgc_size(id_copy) == stmgc_size(obj)); + /* id_copy may be a stub, but in this case, as the original, it + should have been allocated with a big enough chunk of memory */ + assert((id_copy->h_tid & GCFLAG_STUB) || + stmgc_size(id_copy) == stmgc_size(obj)); /* prehash may be specific hash value for prebuilts, or 0 */ revision_t prehash = id_copy->h_original; assert(IMPLIES(prehash, id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)); @@ -277,7 +280,8 @@ | GCFLAG_PUBLIC | GCFLAG_HAS_ID | GCFLAG_PRIVATE_FROM_PROTECTED))); id_copy->h_original = prehash; - id_copy->h_tid = old_tid & ~GCFLAG_VISITED; /* will be visited next */ + id_copy->h_tid = old_tid & ~(GCFLAG_VISITED |/* will be visited next */ + GCFLAG_STUB); /* no longer a stub */ dprintf(("copy %p over %p\n", obj, id_copy)); From noreply at buildbot.pypy.org Wed Jul 24 16:56:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 16:56:30 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Expand the explanation Message-ID: <20130724145630.EB5541C3553@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r431:23fd1341aaed Date: 2013-07-24 16:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/23fd1341aaed/ Log: Expand the explanation diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -727,11 +727,12 @@ { int i, status; - // seed changes daily - // a bit pointless for now.. + /* pick a random seed from the time in seconds. + A bit pointless for now... because the interleaving of the + threads is really random. */ default_seed = time(NULL); - default_seed -= (default_seed % (3600 * 24)); - + printf("running with seed=%lld\n", (long long)default_seed); + for (i = 0; i < SHARED_ROOTS; i++) { if (i % 3 == 0) { shared_roots[i] = allocate_pseudoprebuilt_with_hash( From noreply at buildbot.pypy.org Wed Jul 24 17:10:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 17:10:25 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Add a (temporary?) flag for debugging, and found out that sometimes Message-ID: <20130724151025.36ADA1C13EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r432:642c4347c8bf Date: 2013-07-24 17:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/642c4347c8bf/ Log: Add a (temporary?) flag for debugging, and found out that sometimes we do get a small stub at the wrong place (gcpage:copy_over_original) diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1120,6 +1120,7 @@ gcptr stub = stm_stub_malloc(d->public_descriptor, 0); stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB + | GCFLAG_SMALLSTUB | GCFLAG_OLD; stub->h_revision = ((revision_t)L) | 2; diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -266,6 +266,7 @@ /* id_copy may be a stub, but in this case, as the original, it should have been allocated with a big enough chunk of memory */ + assert(!(id_copy->h_tid & GCFLAG_SMALLSTUB)); assert((id_copy->h_tid & GCFLAG_STUB) || stmgc_size(id_copy) == stmgc_size(obj)); /* prehash may be specific hash value for prebuilts, or 0 */ diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -103,6 +103,7 @@ stub = stm_stub_malloc(sd->foreign_pd, size); stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB + | GCFLAG_SMALLSTUB | GCFLAG_OLD; stub->h_revision = ((revision_t)obj) | 2; if (obj->h_original) { From noreply at buildbot.pypy.org Wed Jul 24 17:15:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 17:15:14 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Bah, no Message-ID: <20130724151514.C84F01C13EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r433:b246a4e9bfb6 Date: 2013-07-24 17:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/b246a4e9bfb6/ Log: Bah, no diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1122,6 +1122,7 @@ | GCFLAG_STUB | GCFLAG_SMALLSTUB | GCFLAG_OLD; + dprintf(("et.c: stm_stub_malloc -> %p\n", stub)); stub->h_revision = ((revision_t)L) | 2; assert(!(L->h_tid & GCFLAG_HAS_ID)); diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -73,6 +73,7 @@ static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; +static const revision_t GCFLAG_SMALLSTUB = STM_FIRST_GCFLAG << 12; /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -278,7 +278,7 @@ memcpy(id_copy, obj, stmgc_size(obj)); assert(!((id_copy->h_tid ^ old_tid) & (GCFLAG_BACKUP_COPY //| GCFLAG_STUB, id_copy may be stub - | GCFLAG_PUBLIC | GCFLAG_HAS_ID + | GCFLAG_PUBLIC | GCFLAG_HAS_ID | GCFLAG_SMALLSTUB | GCFLAG_PRIVATE_FROM_PROTECTED))); id_copy->h_original = prehash; id_copy->h_tid = old_tid & ~(GCFLAG_VISITED |/* will be visited next */ diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -103,7 +103,6 @@ stub = stm_stub_malloc(sd->foreign_pd, size); stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB - | GCFLAG_SMALLSTUB | GCFLAG_OLD; stub->h_revision = ((revision_t)obj) | 2; if (obj->h_original) { From noreply at buildbot.pypy.org Wed Jul 24 17:34:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 17:34:50 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Current status: fails here Message-ID: <20130724153450.324801C13EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r434:1cdc31909021 Date: 2013-07-24 17:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/1cdc31909021/ Log: Current status: fails here diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -219,6 +219,7 @@ /* prebuilt original objects may have a predifined hash in h_original */ if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + assert(id_copy->h_tid & GCFLAG_PUBLIC); if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ @@ -608,8 +609,13 @@ if (!(obj->h_tid & GCFLAG_VISITED)) { /* forget 'obj' */ + dprintf(("private_from_protected: %p UNLISTED\n", obj)); items[i] = items[--d->private_from_protected.size]; } + else { + dprintf(("private_from_protected: %p\n", obj)); + assert(((gcptr)obj->h_revision)->h_tid & GCFLAG_VISITED); + } } assert(d->old_objects_to_trace.size == 0); diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -525,6 +525,6 @@ test_multi_thread(1624) def test_more_multi_thread(): - py.test.skip("more random tests") + #py.test.skip("more random tests") for i in range(200): - yield test_multi_thread, 1742/2 + i + yield test_multi_thread, 1100 + i From noreply at buildbot.pypy.org Wed Jul 24 17:38:06 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 24 Jul 2013 17:38:06 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: add ARM support for cond_call Message-ID: <20130724153806.E46F21C13EE@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: fast-slowpath Changeset: r65612:e79bad52356d Date: 2013-07-24 10:22 -0500 http://bitbucket.org/pypy/pypy/changeset/e79bad52356d/ Log: add ARM support for cond_call diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -259,6 +259,23 @@ else: self.wb_slowpath[withcards + 2 * withfloats] = rawstart + def _build_cond_call_slowpath(self, supports_floats, callee_only): + """ This builds a general call slowpath, for whatever call happens to + come. + """ + mc = InstrBuilder(self.cpu.cpuinfo.arch_version) + # + self._push_all_regs_to_jitframe(mc, [], self.cpu.supports_floats, callee_only) + ## args are in their respective positions + mc.PUSH([r.ip.value, r.lr.value]) + mc.BLX(r.r4.value) + self._reload_frame_if_necessary(mc) + self._pop_all_regs_from_jitframe(mc, [], supports_floats, + callee_only) + # return + mc.POP([r.ip.value, r.pc.value]) + return mc.materialize(self.cpu.asmmemmgr, []) + def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. The arguments are passed in r0 and r10, as follows: diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -301,6 +301,32 @@ self._check_frame_depth_debug(self.mc) return fcond + def cond_call(self, op, gcmap, cond_loc, call_loc, fcond): + assert call_loc is r.r4 + self.mc.TST_rr(cond_loc.value, cond_loc.value) + jmp_adr = self.mc.currpos() + self.mc.BKPT() # patched later + # + self.push_gcmap(self.mc, gcmap, store=True) + # + callee_only = False + floats = False + if self._regalloc is not None: + for reg in self._regalloc.rm.reg_bindings.values(): + if reg not in self._regalloc.rm.save_around_call_regs: + break + else: + callee_only = True + if self._regalloc.vfprm.reg_bindings: + floats = True + cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] + self.mc.BL(cond_call_adr) + self.pop_gcmap(self.mc) + # never any result value + pmc = OverwritingBuilder(self.mc, jmp_adr, WORD) + pmc.B_offs(self.mc.currpos(), c.EQ) # equivalent to 0 as result of TST above + return fcond + def emit_op_jump(self, op, arglocs, regalloc, fcond): target_token = op.getdescr() assert isinstance(target_token, TargetToken) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -164,7 +164,11 @@ def get_scratch_reg(self, type=INT, forbidden_vars=[], selected_reg=None): assert type == INT or type == REF - box = TempBox() + box = None + if type == INT: + box = TempInt() + else: + box = TempPtr() self.temp_boxes.append(box) reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, selected_reg=selected_reg) @@ -1126,6 +1130,24 @@ prepare_op_cond_call_gc_wb_array = prepare_op_cond_call_gc_wb + def prepare_op_cond_call(self, op, fcond): + assert op.result is None + assert 2 <= op.numargs() <= 4 + 2 + tmpreg = self.get_scratch_reg(INT, selected_reg=r.r4) + v = op.getarg(1) + assert isinstance(v, Const) + imm = self.rm.convert_to_imm(v) + self.assembler.regalloc_mov(imm, tmpreg) + args_so_far = [] + for i in range(2, op.numargs()): + reg = r.argument_regs[i - 2] + arg = op.getarg(i) + self.make_sure_var_in_reg(arg, args_so_far, selected_reg=reg) + args_so_far.append(arg) + loc_cond = self.make_sure_var_in_reg(op.getarg(0), args_so_far) + gcmap = self.get_gcmap([tmpreg]) + self.assembler.cond_call(op, gcmap, loc_cond, tmpreg, fcond) + def prepare_op_force_token(self, op, fcond): # XXX for now we return a regular reg res_loc = self.force_allocate_reg(op.result) From noreply at buildbot.pypy.org Wed Jul 24 17:51:25 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 24 Jul 2013 17:51:25 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Manually transplanted add-statvfs from default. Message-ID: <20130724155125.03D601C13EE@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: release-2.1.x Changeset: r65613:a52810f16fa8 Date: 2013-07-24 08:49 -0700 http://bitbucket.org/pypy/pypy/changeset/a52810f16fa8/ Log: Manually transplanted add-statvfs from default. diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -53,44 +54,49 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'fstatvfs': 'interp_posix.fstatvfs', + 'statvfs': 'interp_posix.statvfs', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', } if hasattr(os, 'chown'): @@ -167,8 +173,8 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', 'setregid', 'getsid', 'setsid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) @@ -177,7 +183,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -186,7 +192,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -194,7 +200,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,15 +1,17 @@ -from pypy.interpreter.gateway import unwrap_spec +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat from pypy.module.sys.interp_encoding import getfilesystemencoding -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -213,6 +215,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -253,6 +256,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -314,6 +327,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstatvfs(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -169,7 +169,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -183,6 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -154,6 +154,15 @@ else: return os.lstat(path.as_bytes()) + + at specialize.argtype(0) +def statvfs(path): + if isinstance(path, str): + return os.statvfs(path) + else: + return os.statvfs(path.as_bytes()) + + @specialize.argtype(0) def unlink(path): if isinstance(path, str): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1698,6 +1698,17 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_stat_variant('lstat', traits) + @registering(os.fstatvfs) + def register_os_fstatvfs(self): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) + + @registering_str_unicode(os.statvfs) + def register_os_statvfs(self, traits): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('statvfs', traits) + + # ------------------------------- os.W* --------------------------------- w_star = ['WCOREDUMP', 'WIFCONTINUED', 'WIFSTOPPED', diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -2,20 +2,22 @@ and os.fstat(). In RPython like in plain Python the stat result can be indexed like a tuple but also exposes the st_xxx attributes. """ -import os, sys + +import os +import sys + from rpython.annotator import model as annmodel -from rpython.tool.pairtype import pairtype -from rpython.tool.sourcetools import func_with_new_name, func_renamer -from rpython.rtyper import extregistry -from rpython.rtyper.extfunc import register_external, extdef -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask -from rpython.rlib.objectmodel import specialize +from rpython.rtyper import extregistry +from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.extfunc import extdef +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.tool import rffi_platform as platform +from rpython.tool.pairtype import pairtype +from rpython.tool.sourcetools import func_renamer from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.annlowlevel import hlstr # Support for float times is here. # - ALL_STAT_FIELDS contains Float fields if the system can retrieve @@ -47,12 +49,26 @@ ("st_flags", lltype.Signed), #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented - ] +] N_INDEXABLE_FIELDS = 10 # For OO backends, expose only the portable fields (the first 10). PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] +STATVFS_FIELDS = [ + ("f_bsize", lltype.Signed), + ("f_frsize", lltype.Signed), + ("f_blocks", lltype.Signed), + ("f_bfree", lltype.Signed), + ("f_bavail", lltype.Signed), + ("f_files", lltype.Signed), + ("f_ffree", lltype.Signed), + ("f_favail", lltype.Signed), + ("f_flag", lltype.Signed), + ("f_namemax", lltype.Signed), +] + + # ____________________________________________________________ # # Annotation support @@ -79,6 +95,7 @@ def stat_result_reduce(st): return (st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7], st[8], st[9]) + def stat_result_recreate(tup): return make_stat_result(tup + extra_zeroes) s_reduced = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) @@ -86,6 +103,23 @@ extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) return s_reduced, stat_result_reduce, stat_result_recreate + +class SomeStatvfsResult(annmodel.SomeObject): + knowntype = os.statvfs_result + + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.module import r_os_stat + return r_os_stat.StatvfsResultRepr(rtyper) + + def rtyper_makekey_ex(self, rtyper): + return self.__class__, + + def getattr(self, s_attr): + assert s_attr.is_constant() + TYPE = STATVFS_FIELD_TYPES[s_attr.const] + return annmodel.lltype_to_annotation(TYPE) + + class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): def getitem((s_sta, s_int)): assert s_int.is_constant(), "os.stat()[index]: index must be constant" @@ -94,7 +128,17 @@ name, TYPE = STAT_FIELDS[index] return annmodel.lltype_to_annotation(TYPE) + +class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): + def getitem((s_stat, s_int)): + assert s_int.is_constant() + name, TYPE = STATVFS_FIELDS[s_int.const] + return annmodel.lltype_to_annotation(TYPE) + + s_StatResult = SomeStatResult() +s_StatvfsResult = SomeStatvfsResult() + def make_stat_result(tup): """Turn a tuple into an os.stat_result object.""" @@ -104,6 +148,11 @@ kwds[name] = tup[N_INDEXABLE_FIELDS + i] return os.stat_result(positional, kwds) + +def make_statvfs_result(tup): + return os.statvfs_result(tup) + + class MakeStatResultEntry(extregistry.ExtRegistryEntry): _about_ = make_stat_result @@ -114,22 +163,33 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.specialize_make_stat_result(hop) + +class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_statvfs_result + + def compute_result_annotation(self, s_tup): + return s_StatvfsResult + + def specialize_call(self, hop): + from rpython.rtyper.module import r_os_stat + return r_os_stat.specialize_make_statvfs_result(hop) + # ____________________________________________________________ # # RFFI support if sys.platform.startswith('win'): _name_struct_stat = '_stati64' - INCLUDES = ['sys/types.h', 'sys/stat.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] else: _name_struct_stat = 'stat' - INCLUDES = ['sys/types.h', 'sys/stat.h', 'unistd.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] compilation_info = ExternalCompilationInfo( # This must be set to 64 on some systems to enable large file support. #pre_include_bits = ['#define _FILE_OFFSET_BITS 64'], # ^^^ nowadays it's always set in all C files we produce. - includes = INCLUDES + includes=INCLUDES ) if TIMESPEC is not None: @@ -141,7 +201,7 @@ def posix_declaration(try_to_add=None): - global STAT_STRUCT + global STAT_STRUCT, STATVFS_STRUCT LL_STAT_FIELDS = STAT_FIELDS[:] if try_to_add: @@ -173,15 +233,17 @@ class CConfig: _compilation_info_ = compilation_info STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) + STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS) + try: - config = platform.configure(CConfig, ignore_errors= - try_to_add is not None) + config = platform.configure(CConfig, ignore_errors=try_to_add is not None) except platform.CompilationError: if try_to_add: return # failed to add this field, give up raise STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) + STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT']) if try_to_add: STAT_FIELDS.append(try_to_add) @@ -202,6 +264,9 @@ STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] del _name, _TYPE +STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) +STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] + def build_stat_result(st): # only for LL backends @@ -233,6 +298,21 @@ return make_stat_result(result) +def build_statvfs_result(st): + return make_statvfs_result(( + st.c_f_bsize, + st.c_f_frsize, + st.c_f_blocks, + st.c_f_bfree, + st.c_f_bavail, + st.c_f_files, + st.c_f_ffree, + st.c_f_favail, + st.c_f_flag, + st.c_f_namemax + )) + + def register_stat_variant(name, traits): if name != 'fstat': arg_is_path = True @@ -301,6 +381,56 @@ [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl) + +def register_statvfs_variant(name, traits): + if name != 'fstatvfs': + arg_is_path = True + s_arg = traits.str0 + ARG1 = traits.CCHARP + else: + arg_is_path = False + s_arg = int + ARG1 = rffi.INT + + posix_mystatvfs = rffi.llexternal(name, + [ARG1, STATVFS_STRUCT], rffi.INT, + compilation_info=compilation_info + ) + + @func_renamer('os_%s_llimpl' % (name,)) + def posix_statvfs_llimpl(arg): + stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw') + try: + if arg_is_path: + arg = traits.str2charp(arg) + error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult)) + if arg_is_path: + traits.free_charp(arg) + if error != 0: + raise OSError(rposix.get_errno(), "os_?statvfs failed") + return build_statvfs_result(stresult) + finally: + lltype.free(stresult, flavor='raw') + + @func_renamer('os_%s_fake' % (name,)) + def posix_fakeimpl(arg): + if s_arg == traits.str0: + arg = hlstr(arg) + st = getattr(os, name)(arg) + fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS] + TP = TUPLE_TYPE(fields) + ll_tup = lltype.malloc(TP.TO) + for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS): + val = getattr(st, fieldname) + rffi.setintfield(ll_tup, 'item%d' % i, int(val)) + return ll_tup + + return extdef( + [s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,), + llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl + ) + + def make_win32_stat_impl(name, traits): from rpython.rlib import rwin32 from rpython.rtyper.module.ll_win32file import make_win32_traits diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -67,3 +67,52 @@ # no-op conversion from r_StatResult.r_tuple to r_StatResult hop.exception_cannot_occur() return v_result + + +class StatvfsResultRepr(Repr): + + def __init__(self, rtyper): + self.rtyper = rtyper + self.statvfs_fields = ll_os_stat.STATVFS_FIELDS + + self.statvfs_field_indexes = {} + for i, (name, TYPE) in enumerate(self.statvfs_fields): + self.statvfs_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + for name, TYPE in self.statvfs_fields]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + hop2.forced_opname = 'getitem' + hop2.args_v = [hop2.args_v[0], Constant(index)] + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.statvfs_field_indexes[attr] + except KeyError: + raise TyperError("os.statvfs().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + + +def specialize_make_statvfs_result(hop): + r_StatvfsResult = hop.rtyper.getrepr(ll_os_stat.s_StatvfsResult) + [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) + hop.exception_cannot_occur() + return v_result From noreply at buildbot.pypy.org Wed Jul 24 17:51:26 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 17:51:26 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: at the very least don't break importing rpython on top of pypy Message-ID: <20130724155126.619C21C13EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: release-2.1.x Changeset: r65614:cb8fbb30ed01 Date: 2013-07-24 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/cb8fbb30ed01/ Log: at the very least don't break importing rpython on top of pypy diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1698,15 +1698,16 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_stat_variant('lstat', traits) - @registering(os.fstatvfs) + @registering_if(os, 'fstatvfs') def register_os_fstatvfs(self): from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) - @registering_str_unicode(os.statvfs) - def register_os_statvfs(self, traits): - from rpython.rtyper.module import ll_os_stat - return ll_os_stat.register_statvfs_variant('statvfs', traits) + if hasattr(os, 'statvfs'): + @registering_str_unicode(os.statvfs) + def register_os_statvfs(self, traits): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('statvfs', traits) # ------------------------------- os.W* --------------------------------- From noreply at buildbot.pypy.org Wed Jul 24 17:51:27 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 17:51:27 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: fix translation, maybe Message-ID: <20130724155127.BC26B1C13EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: release-2.1.x Changeset: r65615:bdb563a30cf9 Date: 2013-07-24 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/bdb563a30cf9/ Log: fix translation, maybe diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -105,7 +105,10 @@ class SomeStatvfsResult(annmodel.SomeObject): - knowntype = os.statvfs_result + if hasattr(os, 'statvfs_result'): + knowntype = os.statvfs_result + else: + knowntype = None # will not be used def rtyper_makerepr(self, rtyper): from rpython.rtyper.module import r_os_stat From noreply at buildbot.pypy.org Wed Jul 24 17:51:28 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 24 Jul 2013 17:51:28 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Try to finally fix translation. Message-ID: <20130724155128.EFB9A1C13EE@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: release-2.1.x Changeset: r65616:740e6f1e4dbf Date: 2013-07-24 11:41 +0200 http://bitbucket.org/pypy/pypy/changeset/740e6f1e4dbf/ Log: Try to finally fix translation. diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -69,9 +69,6 @@ 'lstat': 'interp_posix.lstat', 'stat_float_times': 'interp_posix.stat_float_times', - 'fstatvfs': 'interp_posix.fstatvfs', - 'statvfs': 'interp_posix.statvfs', - 'dup': 'interp_posix.dup', 'dup2': 'interp_posix.dup2', 'access': 'interp_posix.access', @@ -175,7 +172,7 @@ for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: From noreply at buildbot.pypy.org Wed Jul 24 17:52:04 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 24 Jul 2013 17:52:04 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: take the real address Message-ID: <20130724155204.F38B71C13EE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65617:c25cd18401d9 Date: 2013-07-24 17:44 +0200 http://bitbucket.org/pypy/pypy/changeset/c25cd18401d9/ Log: take the real address diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1188,7 +1188,6 @@ rst = gcrootmap.get_root_stack_top_addr() if gcrootmap.is_stm: rst = rst - stmtlocal.threadlocal_base() - assert rst > 0 assert rx86.fits_in_32bits(rst) return rst diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -96,7 +96,7 @@ def stm_get_root_stack_top(funcgen, op): result = funcgen.expr(op.result) - return '%s = (%s)stm_shadowstack;' % ( + return '%s = (%s)&stm_shadowstack;' % ( result, cdecl(funcgen.lltypename(op.result), '')) def stm_weakref_allocate(funcgen, op): From noreply at buildbot.pypy.org Wed Jul 24 18:00:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 24 Jul 2013 18:00:58 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add myself Message-ID: <20130724160058.0665A1C13EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5006:344a5ac3fac6 Date: 2013-07-24 18:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/344a5ac3fac6/ Log: Add myself diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -15,6 +15,7 @@ Romain Guillebert ? ? Laurence Tratt lives there Edd Barrett ? ? +Armin Rigo ? ? ==================== ============== ======================= From noreply at buildbot.pypy.org Wed Jul 24 18:14:02 2013 From: noreply at buildbot.pypy.org (krono) Date: Wed, 24 Jul 2013 18:14:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Teach traceviewer a newer syntax of pypylogs Message-ID: <20130724161402.067621C0149@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r65618:6842ab2403ae Date: 2013-05-06 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/6842ab2403ae/ Log: Teach traceviewer a newer syntax of pypylogs diff --git a/rpython/jit/tool/test/f.pypylog.bz2 b/rpython/jit/tool/test/f.pypylog.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a982e459b1daa33547576733ccc0b560f99a3f79 GIT binary patch [cut] diff --git a/rpython/jit/tool/test/test_traceviewer.py b/rpython/jit/tool/test/test_traceviewer.py --- a/rpython/jit/tool/test/test_traceviewer.py +++ b/rpython/jit/tool/test/test_traceviewer.py @@ -1,7 +1,7 @@ import math import py from rpython.jit.tool.traceviewer import splitloops, FinalBlock, Block,\ - split_one_loop, postprocess, main, get_gradient_color + split_one_loop, postprocess, main, get_gradient_color, guard_number def test_gradient_color(): @@ -30,6 +30,20 @@ loops = splitloops(data) assert len(loops) == 2 + def test_no_of_loops_hexguards(self): + data = [preparse(""" + # Loop 0 : loop with 39 ops + debug_merge_point('', 0) + guard_class(p4, 141310752, descr=) [p0, p1] + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), preparse(""" + # Loop 1 : loop with 46 ops + p21 = getfield_gc(p4, descr=) + """)] + loops = splitloops(data) + assert len(loops) == 2 + def test_split_one_loop(self): real_loops = [FinalBlock(preparse(""" p21 = getfield_gc(p4, descr=) @@ -50,12 +64,42 @@ assert loop.left.content == '' assert loop.right.content == 'extra' + def test_split_one_loop_hexguards(self): + real_loops = [FinalBlock(preparse(""" + p21 = getfield_gc(p4, descr=) + guard_class(p4, 141310752, descr=) [p0, p1] + """), None), FinalBlock(preparse(""" + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), None)] + real_loops[0].loop_no = 0 + real_loops[1].loop_no = 1 + allloops = real_loops[:] + split_one_loop(real_loops, 'Guard0x10abcdef0', 'extra', 1, guard_number(("0x10abcdef0", "0x")), allloops) + loop = real_loops[1] + assert isinstance(loop, Block) + assert loop.content.endswith('p1]') + loop.left = allloops[loop.left] + loop.right = allloops[loop.right] + assert loop.left.content == '' + assert loop.right.content == 'extra' + def test_postparse(self): real_loops = [FinalBlock("debug_merge_point(' #40 POP_TOP', 0)", None)] postprocess(real_loops, real_loops[:], {}) assert real_loops[0].header.startswith("_runCallbacks, file '/tmp/x/twisted-trunk/twisted/internet/defer.py', line 357") + def test_postparse_new(self): + real_loops = [FinalBlock("debug_merge_point(0, 0, ' #351 LOAD_FAST')", None)] + postprocess(real_loops, real_loops[:], {}) + assert real_loops[0].header.startswith("_optimize_charset. file '/usr/local/Cellar/pypy/2.0-beta2/lib-python/2.7/sre_compile.py'. line 207") + def test_load_actual(self): fname = py.path.local(__file__).join('..', 'data.log.bz2') main(str(fname), False, view=False) # assert did not explode + + def test_load_actual_f(self): + fname = py.path.local(__file__).join('..', 'f.pypylog.bz2') + main(str(fname), False, view=False) + # assert did not explode diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -56,6 +56,18 @@ BOX_COLOR = (128, 0, 96) +GUARDNO_RE = "((0x)?[\da-f]+)" +def guard_number(guardno_match): + if (len(guardno_match) == 1 # ("12354",) + or guardno_match[1] != "0x" # ("12345", None) + ): + return int(guardno_match[0]) + else: # ("0x12ef", "0x") + return int(guardno_match[0], 16) + +def guard_number_string(guardno_match): + return guardno_match[0] # its always the first group + class BasicBlock(object): counter = 0 startlineno = 0 @@ -85,13 +97,15 @@ def set_content(self, content): self._content = content - groups = re.findall('Guard(\d+)', content) + groups = re.findall('Guard' + GUARDNO_RE, content) if not groups: self.first_guard = -1 self.last_guard = -1 else: - self.first_guard = int(groups[0]) - self.last_guard = int(groups[-1]) + # guards can be out of order nowadays + groups = sorted(groups) + self.first_guard = guard_number(groups[0]) + self.last_guard = guard_number(groups[-1]) content = property(get_content, set_content) @@ -197,11 +211,11 @@ _loop.loop_no = no allloops.append(_loop) else: - m = re.search("bridge out of Guard (\d+)", firstline) + m = re.search("bridge out of Guard " + GUARDNO_RE, firstline) assert m - guard_s = 'Guard' + m.group(1) + guard_s = 'Guard' + guard_number_string(m.groups()) split_one_loop(real_loops, guard_s, loop, counter, - int(m.group(1)), allloops) + guard_number(m.groups()), allloops) counter += loop.count("\n") + 2 return real_loops, allloops @@ -211,7 +225,7 @@ memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\('( (.*?))'", loop.content) + m = re.search("debug_merge_point\((?:\d+,\ )*'( (.*?))'", loop.content) if m is None: name = '?' loop.key = '?' @@ -236,7 +250,7 @@ content = loop.content loop.content = "Logfile at %d\n" % loop.startlineno + content loop.postprocess(loops, memo, counts) - + def postprocess(loops, allloops, counts): for loop in allloops: if isinstance(loop, Block): From noreply at buildbot.pypy.org Wed Jul 24 18:14:03 2013 From: noreply at buildbot.pypy.org (krono) Date: Wed, 24 Jul 2013 18:14:03 +0200 (CEST) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20130724161403.ED4081C0149@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r65619:d63e416d3f3d Date: 2013-05-06 11:32 +0200 http://bitbucket.org/pypy/pypy/changeset/d63e416d3f3d/ Log: merge upstream diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -167,7 +167,6 @@ return if '_fields_' not in self.__dict__: self._fields_ = [] - self._names = [] _set_shape(self, [], self._is_union) __setattr__ = struct_setattr diff --git a/pypy/doc/release-2.0.0.rst b/pypy/doc/release-2.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.0.rst @@ -0,0 +1,61 @@ +============================ +PyPy 2.0 - Einstein Sandwich +============================ + +We're pleased to announce PyPy 2.0. This is a stable release that brings +a swath of bugfixes, small performance improvements and compatibility fixes. + +You can download the PyPy 2.0 release here: + + http://pypy.org/download.html + +The two biggest changes since PyPy 1.9 are: + +* stackless is now supported including greenlets, which means eventlet + and gevent should work (but read below about gevent) + +* PyPy now contains release 0.6 of `cffi`_ as a builtin module, which + is preferred way of calling C from Python that works well on PyPy + +.. _`cffi`: http://cffi.readthedocs.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Windows 64 work is still stalling, we would welcome a volunteer +to handle that. ARM support is on the way and we're expecting to release +an alpha ARM version shortly. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +* Stackless including greenlets should work. For gevent, you need to check + out `pypycore`_ and use the `pypy-hacks`_ branch of gevent. + +* cffi is now a module included with PyPy. (`cffi`_ also exists for + CPython; the two versions should be fully compatible.) It is the + preferred way of calling C from Python that works on PyPy. + +* Callbacks from C are now JITted, which means XML parsing is much faster. + +* A lot of speed improvements in various language corners, most of them small, + but speeding up some particular corners a lot. + +* The JIT was refactored to emit machine code which manipulates a "frame" + that lives on the heap rather than on the stack. This is what makes + Stackless work, and it could bring another future speed-up (not done yet). + +* A lot of stability issues fixed. + +.. _`pypycore`: https://github.com/gevent-on-pypy/pypycore/ +.. _`pypy-hacks`: https://github.com/schmir/gevent/tree/pypy-hacks + +Cheers, +fijal, arigo and the PyPy team diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.0.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,140 +1,7 @@ ====================== -What's new in PyPy 2.0 +What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0-beta1 -.. startrev: 0e6161a009c6 +.. this is a revision shortly after release-2.0 +.. startrev: a13c07067613 -.. branch: split-rpython -Split rpython and pypy into seperate directories - -.. branch: callback-jit -Callbacks from C are now better JITted - -.. branch: fix-jit-logs - -.. branch: remove-globals-in-jit - -.. branch: length-hint -Implement __lenght_hint__ according to PEP 424 - -.. branch: numpypy-longdouble -Long double support for numpypy - -.. branch: numpypy-disable-longdouble -Since r_longdouble support is missing, disable all longdouble and derivative -dtypes using ENABLED_LONG_DOUBLE = False - -.. branch: numpypy-real-as-view -Convert real, imag from ufuncs to views. This involves the beginning of -view() functionality - -.. branch: indexing-by-array -Adds indexing by scalar, adds int conversion from scalar and single element array, -fixes compress, indexing by an array with a smaller shape and the indexed object. - -.. branch: str-dtype-improvement -Allow concatenation of str and numeric arrays - -.. branch: signatures -Improved RPython typing - -.. branch: rpython-bytearray -Rudimentary support for bytearray in RPython - -.. branch: refactor-call_release_gil -Fix a bug which caused cffi to return the wrong result when calling a C -function which calls a Python callback which forces the frames - -.. branch: virtual-raw-mallocs -JIT optimizations which make cffi calls even faster, by removing the need to -allocate a temporary buffer where to store the arguments. - -.. branch: improve-docs-2 -Improve documents and straighten out links - -.. branch: fast-newarray -Inline the fast path of newarray in the assembler. -Disabled on ARM until we fix issues. - -.. branch: reflex-support -Allow dynamic loading of a (Reflex) backend that implements the C-API needed -to provide reflection information - -.. branches we don't care about -.. branch: autoreds -.. branch: kill-faking -.. branch: improved_ebnfparse_error -.. branch: task-decorator -.. branch: fix-e4fa0b2 -.. branch: win32-fixes -.. branch: numpy-unify-methods -.. branch: fix-version-tool -.. branch: popen2-removal -.. branch: pickle-dumps -.. branch: scalar_get_set - -.. branch: release-2.0-beta1 - -.. branch: remove-PYPY_NOT_MAIN_FILE - -.. branch: missing-jit-operations - -.. branch: fix-lookinside-iff-oopspec -Fixed the interaction between two internal tools for controlling the JIT. - -.. branch: inline-virtualref-2 -Better optimized certain types of frame accesses in the JIT, particularly -around exceptions that escape the function they were raised in. - -.. branch: missing-ndarray-attributes -Some missing attributes from ndarrays - -.. branch: cleanup-tests -Consolidated the lib_pypy/pypy_test and pypy/module/test_lib_pypy tests into -one directory for reduced confusion and so they all run nightly. - -.. branch: unquote-faster -.. branch: urlparse-unquote-faster - -.. branch: signal-and-thread -Add "__pypy__.thread.signals_enabled", a context manager. Can be used in a -non-main thread to enable the processing of signal handlers in that thread. - -.. branch: coding-guide-update-rlib-refs -.. branch: rlib-doc-rpython-refs -.. branch: clean-up-remaining-pypy-rlib-refs - -.. branch: enumerate-rstr -Support enumerate() over rstr types. - -.. branch: cleanup-numpypy-namespace -Cleanup _numpypy and numpypy namespaces to more closely resemble numpy. - -.. branch: kill-flowobjspace -Random cleanups to hide FlowObjSpace from public view. - -.. branch: vendor-rename - -.. branch: jitframe-on-heap -Moves optimized JIT frames from stack to heap. As a side effect it enables -stackless to work well with the JIT on PyPy. Also removes a bunch of code from -the GC which fixes cannot find gc roots. - -.. branch: pycon2013-doc-fixes -Documentation fixes after going through the docs at PyCon 2013 sprint. - -.. branch: extregistry-refactor - -.. branch: remove-list-smm -.. branch: bridge-logging -.. branch: curses_cffi -cffi implementation of _curses - -.. branch: sqlite-cffi -cffi implementation of sqlite3 - -.. branch: release-2.0-beta2 -.. branch: unbreak-freebsd - -.. branch: virtualref-virtualizable diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -6,8 +6,14 @@ if sys.platform.startswith('linux'): arch = 'linux' + cmd = 'wget "%s"' + tar = "tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" +if sys.platform.startswith('darwin'): + arch = 'osx' + cmd = 'curl -O "%s"' + tar = "tar -x -v --strip-components=2 -f %s '*/bin/pypy'" else: - print 'Cannot determine the platform, please update this scrip' + print 'Cannot determine the platform, please update this script' sys.exit(1) if sys.maxint == 2**63 - 1: @@ -23,10 +29,9 @@ tmp = py.path.local.mkdtemp() mydir = tmp.chdir() print 'Downloading pypy to', tmp -if os.system('wget "%s"' % url) != 0: +if os.system(cmd % url) != 0: sys.exit(1) print 'Extracting pypy binary' mydir.chdir() -os.system("tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" % tmp.join(filename)) - +os.system(tar % tmp.join(filename)) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -85,3 +85,7 @@ from rpython.jit.backend import detect_cpu model = detect_cpu.autodetect_main_model_and_size() self.extra_interpdef('cpumodel', 'space.wrap(%r)' % model) + if self.space.config.translation.jit: + features = detect_cpu.getcpufeatures(model) + self.extra_interpdef('jit_backend_features', + 'space.wrap(%r)' % features) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -62,3 +62,14 @@ assert list_strategy(l) == "empty" o = 5 raises(TypeError, list_strategy, 5) + + +class AppTestJitFeatures(object): + spaceconfig = {"translation.jit": True} + + def test_jit_backend_features(self): + from __pypy__ import jit_backend_features + supported_types = jit_backend_features + assert isinstance(supported_types, list) + for x in supported_types: + assert x in ['floats', 'singlefloats', 'longlong'] diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -38,5 +38,5 @@ def shutdown(self, space): # at shutdown, flush all open streams. Ignore I/O errors. - from pypy.module._io.interp_iobase import get_autoflushher - get_autoflushher(space).flush_all(space) + from pypy.module._io.interp_iobase import get_autoflusher + get_autoflusher(space).flush_all(space) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -47,7 +47,7 @@ self.w_dict = space.newdict() self.__IOBase_closed = False self.streamholder = None # needed by AutoFlusher - get_autoflushher(space).add(self) + get_autoflusher(space).add(self) def getdict(self, space): return self.w_dict @@ -103,7 +103,7 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True - get_autoflushher(space).remove(self) + get_autoflusher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -363,5 +363,5 @@ else: streamholder.autoflush(space) -def get_autoflushher(space): +def get_autoflusher(space): return space.fromcache(AutoFlusher) diff --git a/pypy/module/_random/interp_random.py b/pypy/module/_random/interp_random.py --- a/pypy/module/_random/interp_random.py +++ b/pypy/module/_random/interp_random.py @@ -33,8 +33,8 @@ elif space.isinstance_w(w_n, space.w_long): w_n = space.abs(w_n) else: - # XXX not perfectly like CPython - w_n = space.abs(space.hash(w_n)) + n = space.hash_w(w_n) + w_n = space.wrap(r_uint(n)) key = [] w_one = space.newint(1) w_two = space.newint(2) diff --git a/pypy/module/_random/test/test_random.py b/pypy/module/_random/test/test_random.py --- a/pypy/module/_random/test/test_random.py +++ b/pypy/module/_random/test/test_random.py @@ -42,13 +42,14 @@ rnd1.setstate((-1, ) * 624 + (0, )) def test_seed(self): - import _random + import _random, sys rnd = _random.Random() rnd.seed() different_nums = [] + mask = sys.maxint * 2 + 1 for obj in ["spam and eggs", 3.14, 1+2j, 'a', tuple('abc')]: nums = [] - for o in [obj, hash(obj), -hash(obj)]: + for o in [obj, hash(obj) & mask, -(hash(obj) & mask)]: rnd.seed(o) nums.append([rnd.random() for i in range(100)]) n1 = nums[0] diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -16,6 +16,8 @@ from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.module.sys.version import PYPY_VERSION +_WIN32 = sys.platform == 'win32' + SEARCH_ERROR = 0 PY_SOURCE = 1 PY_COMPILED = 2 @@ -27,12 +29,8 @@ # PY_CODERESOURCE = 8 IMP_HOOK = 9 -if sys.platform == 'win32': - SO = ".pyd" -else: - SO = ".so" +SO = '.pyd' if _WIN32 else '.so' DEFAULT_SOABI = 'pypy-%d%d' % PYPY_VERSION[:2] -CHECK_FOR_PYW = sys.platform == 'win32' @specialize.memo() def get_so_extension(space): @@ -64,7 +62,7 @@ return PY_SOURCE, ".py", "U" # on Windows, also check for a .pyw file - if CHECK_FOR_PYW: + if _WIN32: pyfile = filepart + ".pyw" if file_exists(pyfile): return PY_SOURCE, ".pyw", "U" diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -119,6 +119,12 @@ """) def test_array_of_floats(self): + try: + from __pypy__ import jit_backend_features + if 'singlefloats' not in jit_backend_features: + py.test.skip("test requres singlefloats support from the JIT backend") + except ImportError: + pass def main(): from array import array img = array('f', [21.5]*1000) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py b/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_structures.py @@ -230,6 +230,17 @@ pt = POINT(y=2, x=1) assert (pt.x, pt.y) == (1, 2) + def test_subclass_initializer(self): + class POINT(Structure): + _fields_ = [("x", c_int), ("y", c_int)] + + class POSITION(POINT): + # A subclass without _fields_ + pass + pos = POSITION(1, 2) + assert (pos.x, pos.y) == (1, 2) + + def test_invalid_field_types(self): class POINT(Structure): pass @@ -538,6 +549,7 @@ raises(AttributeError, setattr, X, "_fields_", []) Y.__fields__ = [] + class TestPatologicalCases(BaseCTypesTestChecker): def test_structure_overloading_getattr(self): class X(Structure): diff --git a/pypy/objspace/std/stringobject.py b/pypy/objspace/std/stringobject.py --- a/pypy/objspace/std/stringobject.py +++ b/pypy/objspace/std/stringobject.py @@ -39,12 +39,10 @@ def unicode_w(w_self, space): # Use the default encoding. - from pypy.objspace.std.unicodetype import unicode_from_string, \ - decode_object + from pypy.objspace.std.unicodetype import (unicode_from_string, + decode_object, _get_encoding_and_errors) w_defaultencoding = space.call_function(space.sys.get( 'getdefaultencoding')) - from pypy.objspace.std.unicodetype import _get_encoding_and_errors, \ - unicode_from_string, decode_object encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, space.w_None) if encoding is None and errors is None: @@ -236,7 +234,7 @@ def str_title__String(space, w_self): input = w_self._value builder = StringBuilder(len(input)) - prev_letter=' ' + prev_letter = ' ' for pos in range(len(input)): ch = input[pos] @@ -434,7 +432,7 @@ space.wrap("rjust() argument 2 must be a single character")) d = u_arg - len(u_self) - if d>0: + if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character u_self = d * fillchar + u_self @@ -450,7 +448,7 @@ space.wrap("ljust() argument 2 must be a single character")) d = u_arg - len(u_self) - if d>0: + if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character u_self += d * fillchar @@ -471,12 +469,12 @@ return space.newbool(self.find(sub) >= 0) def str_find__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end): - (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) + (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) res = self.find(w_sub._value, start, end) return space.wrap(res) def str_rfind__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end): - (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) + (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) res = self.rfind(w_sub._value, start, end) return space.wrap(res) @@ -511,7 +509,7 @@ def str_index__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end): - (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) + (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) res = self.find(w_sub._value, start, end) if res < 0: raise OperationError(space.w_ValueError, @@ -521,7 +519,7 @@ def str_rindex__String_String_ANY_ANY(space, w_self, w_sub, w_start, w_end): - (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) + (self, start, end) = _convert_idx_params(space, w_self, w_start, w_end) res = self.rfind(w_sub._value, start, end) if res < 0: raise OperationError(space.w_ValueError, @@ -728,7 +726,7 @@ while 1: #no sophisticated linebreak support now, '\r' just for passing adapted CPython test if u_token[offset-1] == "\n" or u_token[offset-1] == "\r": - break; + break distance += 1 offset -= 1 if offset == 0: @@ -738,7 +736,7 @@ #print '' % (offset, distance, u_tabsize, u_token) distance = (u_tabsize-distance) % u_tabsize if distance == 0: - distance=u_tabsize + distance = u_tabsize return distance @@ -760,14 +758,14 @@ for token in split: #print "%d#%d -%s-" % (_tabindent(oldtoken,u_tabsize), u_tabsize, token) - u_expanded += " " * _tabindent(oldtoken,u_tabsize) + token + u_expanded += " " * _tabindent(oldtoken, u_tabsize) + token oldtoken = token return wrapstr(space, u_expanded) def str_splitlines__String_ANY(space, w_self, w_keepends): - u_keepends = space.int_w(w_keepends) # truth value, but type checked + u_keepends = space.int_w(w_keepends) # truth value, but type checked data = w_self._value selflen = len(data) strs_w = [] @@ -876,7 +874,6 @@ return wrapchar(space, str[ival]) def getitem__String_Slice(space, w_str, w_slice): - w = space.wrap s = w_str._value length = len(s) start, stop, step, sl = w_slice.indices4(space, length) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -948,6 +948,9 @@ def test_setitem_slice_performance(self): # because of a complexity bug, this used to take forever on a # translated pypy. On CPython2.6 -A, it takes around 5 seconds. + import platform + if platform.machine().startswith('arm'): + skip("consumes too much memory for most ARM machines") if self.runappdirect: count = 16*1024*1024 else: diff --git a/pypy/pytest-A.py b/pypy/pytest-A.py --- a/pypy/pytest-A.py +++ b/pypy/pytest-A.py @@ -6,6 +6,7 @@ 'interpreter/pyparser/test', 'interpreter/test', 'interpreter/test2', + 'module/test_lib_pypy', 'objspace/std/test', ], } diff --git a/rpython/jit/backend/arm/test/test_fficall.py b/rpython/jit/backend/arm/test/test_fficall.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/arm/test/test_fficall.py @@ -0,0 +1,23 @@ +import py +from rpython.jit.metainterp.test import test_fficall +from rpython.jit.backend.arm.test.support import JitARMMixin + +class TestFfiCall(JitARMMixin, test_fficall.FfiCallTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_fficall.py + + def _add_libffi_types_to_ll2types_maybe(self): + # this is needed by test_guard_not_forced_fails, because it produces a + # loop which reads the value of types.* in a variable, then a guard + # fail and we switch to blackhole: the problem is that at this point + # the blackhole interp has a real integer, but it needs to convert it + # back to a lltype pointer (which is handled by ll2ctypes, deeply in + # the logic). The workaround is to teach ll2ctypes in advance which + # are the addresses of the various types.* structures. + # Try to comment this code out and run the test to see how it fails :) + from rpython.rtyper.lltypesystem import rffi, lltype, ll2ctypes + from rpython.rlib.jit_libffi import types + for key, value in types.__dict__.iteritems(): + if isinstance(value, lltype._ptr): + addr = rffi.cast(lltype.Signed, value) + ll2ctypes._int2obj[addr] = value diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -115,6 +115,14 @@ mod = __import__(modname, {}, {}, clsname) return getattr(mod, clsname) + +def getcpufeatures(backend_name="auto"): + """NOT_RPYTHON""" + cpucls = getcpuclass(backend_name) + return [attr[len('supports_'):] for attr in dir(cpucls) + if attr.startswith('supports_') + and getattr(cpucls, attr)] + if __name__ == '__main__': print autodetect() print getcpuclassname() diff --git a/rpython/jit/backend/test/test_detect_cpu.py b/rpython/jit/backend/test/test_detect_cpu.py --- a/rpython/jit/backend/test/test_detect_cpu.py +++ b/rpython/jit/backend/test/test_detect_cpu.py @@ -31,3 +31,9 @@ def test_detect_main_model_and_size_from_platform(): info = autodetect_main_model_and_size() assert detect_main_model_and_size_from_platform() == info + +def test_getcpufeatures(): + features = getcpufeatures() + assert isinstance(features, list) + for x in features: + assert x in ['floats', 'singlefloats', 'longlong'] diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1751,7 +1751,7 @@ def rewrite_op_jit_ffi_save_result(self, op): kind = op.args[0].value - assert kind in ('int', 'float') + assert kind in ('int', 'float', 'longlong', 'singlefloat') return SpaceOperation('libffi_save_result_%s' % kind, op.args[1:], None) def rewrite_op_jit_force_virtual(self, op): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1351,24 +1351,39 @@ def bhimpl_ll_read_timestamp(): return read_timestamp() - @arguments("cpu", "i", "i", "i") - def bhimpl_libffi_save_result_int(self, cif_description, exchange_buffer, result): - ARRAY = lltype.Ptr(rffi.CArray(lltype.Signed)) - cif_description = self.cast_int_to_ptr(cif_description, CIF_DESCRIPTION_P) - exchange_buffer = self.cast_int_to_ptr(exchange_buffer, rffi.CCHARP) + def _libffi_save_result(self, cif_description, exchange_buffer, result): + ARRAY = lltype.Ptr(rffi.CArray(lltype.typeOf(result))) + cast_int_to_ptr = self.cpu.cast_int_to_ptr + cif_description = cast_int_to_ptr(cif_description, CIF_DESCRIPTION_P) + exchange_buffer = cast_int_to_ptr(exchange_buffer, rffi.CCHARP) # data_out = rffi.ptradd(exchange_buffer, cif_description.exchange_result) rffi.cast(ARRAY, data_out)[0] = result + _libffi_save_result._annspecialcase_ = 'specialize:argtype(3)' - @arguments("cpu", "i", "i", "f") - def bhimpl_libffi_save_result_float(self, cif_description, exchange_buffer, result): + @arguments("self", "i", "i", "i") + def bhimpl_libffi_save_result_int(self, cif_description, + exchange_buffer, result): + self._libffi_save_result(cif_description, exchange_buffer, result) + + @arguments("self", "i", "i", "f") + def bhimpl_libffi_save_result_float(self, cif_description, + exchange_buffer, result): result = longlong.getrealfloat(result) - ARRAY = lltype.Ptr(rffi.CArray(lltype.Float)) - cif_description = self.cast_int_to_ptr(cif_description, CIF_DESCRIPTION_P) - exchange_buffer = self.cast_int_to_ptr(exchange_buffer, rffi.CCHARP) - # - data_out = rffi.ptradd(exchange_buffer, cif_description.exchange_result) - rffi.cast(ARRAY, data_out)[0] = result + self._libffi_save_result(cif_description, exchange_buffer, result) + + @arguments("self", "i", "i", "f") + def bhimpl_libffi_save_result_longlong(self, cif_description, + exchange_buffer, result): + # 32-bit only: 'result' is here a LongLong + assert longlong.is_longlong(lltype.typeOf(result)) + self._libffi_save_result(cif_description, exchange_buffer, result) + + @arguments("self", "i", "i", "i") + def bhimpl_libffi_save_result_singlefloat(self, cif_description, + exchange_buffer, result): + result = longlong.int2singlefloat(result) + self._libffi_save_result(cif_description, exchange_buffer, result) # ---------- diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1190,8 +1190,8 @@ return self.metainterp.execute_and_record(rop.READ_TIMESTAMP, None) @arguments("box", "box", "box") - def opimpl_libffi_save_result_int(self, box_cif_description, box_exchange_buffer, - box_result): + def _opimpl_libffi_save_result(self, box_cif_description, + box_exchange_buffer, box_result): from rpython.rtyper.lltypesystem import llmemory from rpython.rlib.jit_libffi import CIF_DESCRIPTION_P from rpython.jit.backend.llsupport.ffisupport import get_arg_descr @@ -1208,10 +1208,14 @@ assert ofs % itemsize == 0 # alignment check (result) self.metainterp.history.record(rop.SETARRAYITEM_RAW, [box_exchange_buffer, - ConstInt(ofs // itemsize), box_result], + ConstInt(ofs // itemsize), + box_result], None, descr) - opimpl_libffi_save_result_float = opimpl_libffi_save_result_int + opimpl_libffi_save_result_int = _opimpl_libffi_save_result + opimpl_libffi_save_result_float = _opimpl_libffi_save_result + opimpl_libffi_save_result_longlong = _opimpl_libffi_save_result + opimpl_libffi_save_result_singlefloat = _opimpl_libffi_save_result # ------------------------------ diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -14,7 +14,10 @@ def _get_jitcodes(testself, CPUClass, func, values, type_system, - supports_longlong=False, translationoptions={}, **kwds): + supports_floats=True, + supports_longlong=False, + supports_singlefloats=False, + translationoptions={}, **kwds): from rpython.jit.codewriter import support class FakeJitCell(object): @@ -67,9 +70,16 @@ cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) cw.debug = True testself.cw = cw + if supports_floats and not cpu.supports_floats: + py.test.skip("this test requires supports_floats=True") + if supports_longlong and not cpu.supports_longlong: + py.test.skip("this test requires supports_longlong=True") + if supports_singlefloats and not cpu.supports_singlefloats: + py.test.skip("this test requires supports_singlefloats=True") policy = JitPolicy() - policy.set_supports_floats(True) + policy.set_supports_floats(supports_floats) policy.set_supports_longlong(supports_longlong) + policy.set_supports_singlefloats(supports_singlefloats) graphs = cw.find_all_graphs(policy) if kwds.get("backendopt"): backend_optimizations(rtyper.annotator.translator, graphs=graphs) diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -5,13 +5,13 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.annlowlevel import llhelper from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.codewriter.longlong import is_longlong +from rpython.jit.codewriter.longlong import is_longlong, is_64_bit from rpython.rlib import jit from rpython.rlib import jit_libffi from rpython.rlib.jit_libffi import (types, CIF_DESCRIPTION, FFI_TYPE_PP, jit_ffi_call, jit_ffi_save_result) from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.rarithmetic import intmask, r_longlong +from rpython.rlib.rarithmetic import intmask, r_longlong, r_singlefloat from rpython.rlib.longlong2float import float2longlong def get_description(atypes, rtype): @@ -45,7 +45,12 @@ class FfiCallTests(object): - def _run(self, atypes, rtype, avalues, rvalue, expected_call_release_gil=1): + def _run(self, atypes, rtype, avalues, rvalue, + expected_call_release_gil=1, + supports_floats=True, + supports_longlong=True, + supports_singlefloats=True): + cif_description = get_description(atypes, rtype) def verify(*args): @@ -67,7 +72,11 @@ for avalue in unroll_avalues: TYPE = rffi.CArray(lltype.typeOf(avalue)) data = rffi.ptradd(exchange_buffer, ofs) - assert rffi.cast(lltype.Ptr(TYPE), data)[0] == avalue + got = rffi.cast(lltype.Ptr(TYPE), data)[0] + if lltype.typeOf(avalue) is lltype.SingleFloat: + got = float(got) + avalue = float(avalue) + assert got == avalue ofs += 16 if rvalue is not None: write_rvalue = rvalue @@ -96,17 +105,30 @@ data = rffi.ptradd(exbuf, ofs) res = rffi.cast(lltype.Ptr(TYPE), data)[0] lltype.free(exbuf, flavor='raw') + if lltype.typeOf(res) is lltype.SingleFloat: + res = float(res) return res + def matching_result(res, rvalue): + if rvalue is None: + return res == 654321 + if isinstance(rvalue, r_singlefloat): + rvalue = float(rvalue) + return res == rvalue + with FakeFFI(fake_call_impl_any): res = f() - assert res == rvalue or (res, rvalue) == (654321, None) - res = self.interp_operations(f, []) + assert matching_result(res, rvalue) + res = self.interp_operations(f, [], + supports_floats = supports_floats, + supports_longlong = supports_longlong, + supports_singlefloats = supports_singlefloats) if is_longlong(FUNC.RESULT): - # longlongs are passed around as floats inside the JIT, we - # need to convert it back before checking the value + # longlongs are returned as floats, but that's just + # an inconvenience of interp_operations(). Normally both + # longlong and floats are passed around as longlongs. res = float2longlong(res) - assert res == rvalue or (res, rvalue) == (654321, None) + assert matching_result(res, rvalue) self.check_operations_history(call_may_force=0, call_release_gil=expected_call_release_gil) @@ -119,14 +141,24 @@ [-123456*j for j in range(i)], -42434445) - def test_simple_call_float(self): - self._run([types.double] * 2, types.double, [45.6, 78.9], -4.2) + def test_simple_call_float(self, **kwds): + self._run([types.double] * 2, types.double, [45.6, 78.9], -4.2, **kwds) - def test_simple_call_longlong(self): + def test_simple_call_longlong(self, **kwds): maxint32 = 2147483647 a = r_longlong(maxint32) + 1 b = r_longlong(maxint32) + 2 - self._run([types.slonglong] * 2, types.slonglong, [a, b], a) + self._run([types.slonglong] * 2, types.slonglong, [a, b], a, **kwds) + + def test_simple_call_singlefloat_args(self): + self._run([types.float] * 2, types.double, + [r_singlefloat(10.5), r_singlefloat(31.5)], + -4.5) + + def test_simple_call_singlefloat(self, **kwds): + self._run([types.float] * 2, types.float, + [r_singlefloat(10.5), r_singlefloat(31.5)], + r_singlefloat(-4.5), **kwds) def test_simple_call_longdouble(self): # longdouble is not supported, so we expect NOT to generate a call_release_gil @@ -266,3 +298,20 @@ assert res == math.sin(1.23) lltype.free(atypes, flavor='raw') + + def test_simple_call_float_unsupported(self): + self.test_simple_call_float(supports_floats=False, + expected_call_release_gil=0) + + def test_simple_call_longlong_unsupported(self): + self.test_simple_call_longlong(supports_longlong=False, + expected_call_release_gil=is_64_bit) + + def test_simple_call_singlefloat_unsupported(self): + self.test_simple_call_singlefloat(supports_singlefloats=False, + expected_call_release_gil=0) + + def test_simple_call_float_even_if_other_unsupported(self): + self.test_simple_call_float(supports_longlong=False, + supports_singlefloats=False) + # this is the default: expected_call_release_gil=1 diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -281,11 +281,11 @@ def finish_helpers(self, backendopt=True): if self.translator is not None: self.mixlevelannotator.finish_annotate() - self.finished_helpers = True if self.translator is not None: self.mixlevelannotator.finish_rtype() if backendopt: self.mixlevelannotator.backend_optimize() + self.finished_helpers = True # Make sure that the database also sees all finalizers now. # It is likely that the finalizers need special support there newgcdependencies = self.ll_finalizers_ptrs diff --git a/rpython/rlib/jit_libffi.py b/rpython/rlib/jit_libffi.py --- a/rpython/rlib/jit_libffi.py +++ b/rpython/rlib/jit_libffi.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rlib import clibffi, jit +from rpython.rlib.rarithmetic import r_longlong, r_singlefloat from rpython.rlib.nonconst import NonConstant @@ -107,12 +108,14 @@ reskind = types.getkind(cif_description.rtype) if reskind == 'v': jit_ffi_call_impl_void(cif_description, func_addr, exchange_buffer) - elif reskind == 'f' or reskind == 'L': # L is for longlongs, on 32bit - result = jit_ffi_call_impl_float(cif_description, func_addr, exchange_buffer) - jit_ffi_save_result('float', cif_description, exchange_buffer, result) elif reskind == 'i' or reskind == 'u': - result = jit_ffi_call_impl_int(cif_description, func_addr, exchange_buffer) - jit_ffi_save_result('int', cif_description, exchange_buffer, result) + _do_ffi_call_int(cif_description, func_addr, exchange_buffer) + elif reskind == 'f': + _do_ffi_call_float(cif_description, func_addr, exchange_buffer) + elif reskind == 'L': # L is for longlongs, on 32bit + _do_ffi_call_longlong(cif_description, func_addr, exchange_buffer) + elif reskind == 'S': # SingleFloat + _do_ffi_call_singlefloat(cif_description, func_addr, exchange_buffer) else: # the result kind is not supported: we disable the jit_ffi_call # optimization by calling directly jit_ffi_call_impl_any, so the JIT @@ -123,6 +126,30 @@ jit_ffi_call_impl_any(cif_description, func_addr, exchange_buffer) +def _do_ffi_call_int(cif_description, func_addr, exchange_buffer): + result = jit_ffi_call_impl_int(cif_description, func_addr, + exchange_buffer) + jit_ffi_save_result('int', cif_description, exchange_buffer, result) + +def _do_ffi_call_float(cif_description, func_addr, exchange_buffer): + # a separate function in case the backend doesn't support floats + result = jit_ffi_call_impl_float(cif_description, func_addr, + exchange_buffer) + jit_ffi_save_result('float', cif_description, exchange_buffer, result) + +def _do_ffi_call_longlong(cif_description, func_addr, exchange_buffer): + # a separate function in case the backend doesn't support longlongs + result = jit_ffi_call_impl_longlong(cif_description, func_addr, + exchange_buffer) + jit_ffi_save_result('longlong', cif_description, exchange_buffer, result) + +def _do_ffi_call_singlefloat(cif_description, func_addr, exchange_buffer): + # a separate function in case the backend doesn't support singlefloats + result = jit_ffi_call_impl_singlefloat(cif_description, func_addr, + exchange_buffer) + jit_ffi_save_result('singlefloat', cif_description, exchange_buffer,result) + + # we must return a NonConstant else we get the constant -1 as the result of # the flowgraph, and the codewriter does not produce a box for the # result. Note that when not-jitted, the result is unused, but when jitted the @@ -139,6 +166,16 @@ return NonConstant(-1.0) @jit.oopspec("libffi_call(cif_description,func_addr,exchange_buffer)") +def jit_ffi_call_impl_longlong(cif_description, func_addr, exchange_buffer): + jit_ffi_call_impl_any(cif_description, func_addr, exchange_buffer) + return r_longlong(-1) + + at jit.oopspec("libffi_call(cif_description,func_addr,exchange_buffer)") +def jit_ffi_call_impl_singlefloat(cif_description, func_addr, exchange_buffer): + jit_ffi_call_impl_any(cif_description, func_addr, exchange_buffer) + return r_singlefloat(-1.0) + + at jit.oopspec("libffi_call(cif_description,func_addr,exchange_buffer)") def jit_ffi_call_impl_void(cif_description, func_addr, exchange_buffer): jit_ffi_call_impl_any(cif_description, func_addr, exchange_buffer) return None @@ -175,7 +212,7 @@ def compute_result_annotation(self, kind_s, *args_s): from rpython.annotator import model as annmodel assert isinstance(kind_s, annmodel.SomeString) - assert kind_s.const in ('int', 'float') + assert kind_s.const in ('int', 'float', 'longlong', 'singlefloat') def specialize_call(self, hop): hop.exception_cannot_occur() diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -329,7 +329,7 @@ self.collect_one_testdir(testdirs, reldir, [self.reltoroot(t) for t in entries if self.is_test_py_file(t)]) - return + break for p1 in entries: if p1.check(dir=1, link=0): From noreply at buildbot.pypy.org Wed Jul 24 18:14:05 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 24 Jul 2013 18:14:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in krono/pypy (pull request #150) Message-ID: <20130724161405.693301C0149@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65620:18e46eddb306 Date: 2013-07-24 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/18e46eddb306/ Log: Merged in krono/pypy (pull request #150) Adapt Traceviewer to changed trace-logs diff --git a/rpython/jit/tool/test/f.pypylog.bz2 b/rpython/jit/tool/test/f.pypylog.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a982e459b1daa33547576733ccc0b560f99a3f79 GIT binary patch [cut] diff --git a/rpython/jit/tool/test/test_traceviewer.py b/rpython/jit/tool/test/test_traceviewer.py --- a/rpython/jit/tool/test/test_traceviewer.py +++ b/rpython/jit/tool/test/test_traceviewer.py @@ -1,7 +1,7 @@ import math import py from rpython.jit.tool.traceviewer import splitloops, FinalBlock, Block,\ - split_one_loop, postprocess, main, get_gradient_color + split_one_loop, postprocess, main, get_gradient_color, guard_number def test_gradient_color(): @@ -30,6 +30,20 @@ loops = splitloops(data) assert len(loops) == 2 + def test_no_of_loops_hexguards(self): + data = [preparse(""" + # Loop 0 : loop with 39 ops + debug_merge_point('', 0) + guard_class(p4, 141310752, descr=) [p0, p1] + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), preparse(""" + # Loop 1 : loop with 46 ops + p21 = getfield_gc(p4, descr=) + """)] + loops = splitloops(data) + assert len(loops) == 2 + def test_split_one_loop(self): real_loops = [FinalBlock(preparse(""" p21 = getfield_gc(p4, descr=) @@ -50,12 +64,42 @@ assert loop.left.content == '' assert loop.right.content == 'extra' + def test_split_one_loop_hexguards(self): + real_loops = [FinalBlock(preparse(""" + p21 = getfield_gc(p4, descr=) + guard_class(p4, 141310752, descr=) [p0, p1] + """), None), FinalBlock(preparse(""" + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), None)] + real_loops[0].loop_no = 0 + real_loops[1].loop_no = 1 + allloops = real_loops[:] + split_one_loop(real_loops, 'Guard0x10abcdef0', 'extra', 1, guard_number(("0x10abcdef0", "0x")), allloops) + loop = real_loops[1] + assert isinstance(loop, Block) + assert loop.content.endswith('p1]') + loop.left = allloops[loop.left] + loop.right = allloops[loop.right] + assert loop.left.content == '' + assert loop.right.content == 'extra' + def test_postparse(self): real_loops = [FinalBlock("debug_merge_point(' #40 POP_TOP', 0)", None)] postprocess(real_loops, real_loops[:], {}) assert real_loops[0].header.startswith("_runCallbacks, file '/tmp/x/twisted-trunk/twisted/internet/defer.py', line 357") + def test_postparse_new(self): + real_loops = [FinalBlock("debug_merge_point(0, 0, ' #351 LOAD_FAST')", None)] + postprocess(real_loops, real_loops[:], {}) + assert real_loops[0].header.startswith("_optimize_charset. file '/usr/local/Cellar/pypy/2.0-beta2/lib-python/2.7/sre_compile.py'. line 207") + def test_load_actual(self): fname = py.path.local(__file__).join('..', 'data.log.bz2') main(str(fname), False, view=False) # assert did not explode + + def test_load_actual_f(self): + fname = py.path.local(__file__).join('..', 'f.pypylog.bz2') + main(str(fname), False, view=False) + # assert did not explode diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -56,6 +56,18 @@ BOX_COLOR = (128, 0, 96) +GUARDNO_RE = "((0x)?[\da-f]+)" +def guard_number(guardno_match): + if (len(guardno_match) == 1 # ("12354",) + or guardno_match[1] != "0x" # ("12345", None) + ): + return int(guardno_match[0]) + else: # ("0x12ef", "0x") + return int(guardno_match[0], 16) + +def guard_number_string(guardno_match): + return guardno_match[0] # its always the first group + class BasicBlock(object): counter = 0 startlineno = 0 @@ -85,13 +97,15 @@ def set_content(self, content): self._content = content - groups = re.findall('Guard(\d+)', content) + groups = re.findall('Guard' + GUARDNO_RE, content) if not groups: self.first_guard = -1 self.last_guard = -1 else: - self.first_guard = int(groups[0]) - self.last_guard = int(groups[-1]) + # guards can be out of order nowadays + groups = sorted(groups) + self.first_guard = guard_number(groups[0]) + self.last_guard = guard_number(groups[-1]) content = property(get_content, set_content) @@ -197,11 +211,11 @@ _loop.loop_no = no allloops.append(_loop) else: - m = re.search("bridge out of Guard (\d+)", firstline) + m = re.search("bridge out of Guard " + GUARDNO_RE, firstline) assert m - guard_s = 'Guard' + m.group(1) + guard_s = 'Guard' + guard_number_string(m.groups()) split_one_loop(real_loops, guard_s, loop, counter, - int(m.group(1)), allloops) + guard_number(m.groups()), allloops) counter += loop.count("\n") + 2 return real_loops, allloops @@ -211,7 +225,7 @@ memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\('( (.*?))'", loop.content) + m = re.search("debug_merge_point\((?:\d+,\ )*'( (.*?))'", loop.content) if m is None: name = '?' loop.key = '?' @@ -236,7 +250,7 @@ content = loop.content loop.content = "Logfile at %d\n" % loop.startlineno + content loop.postprocess(loops, memo, counts) - + def postprocess(loops, allloops, counts): for loop in allloops: if isinstance(loop, Block): From noreply at buildbot.pypy.org Wed Jul 24 18:28:31 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 18:28:31 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: merge fast-slowpath Message-ID: <20130724162831.CC0AF1C13EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65621:37a5e2b5b3ea Date: 2013-07-24 18:27 +0200 http://bitbucket.org/pypy/pypy/changeset/37a5e2b5b3ea/ Log: merge fast-slowpath diff too long, truncating to 2000 out of 12721 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ .idea .project .pydevproject +__pycache__ syntax: regexp ^testresult$ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -219,32 +282,32 @@ Change Maker, Sweden University of California Berkeley, USA Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' +License for 'rpython/translator/jvm/src/jna.jar' ============================================= -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU +The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU Lesser General Public License of which you can find a copy here: http://www.gnu.org/licenses/lgpl.html -License for 'pypy/translator/jvm/src/jasmin.jar' +License for 'rpython/translator/jvm/src/jasmin.jar' ================================================ -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer +The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer and distributed with permission. The use of Jasmin by PyPy does not imply that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, the following disclaimer applies to Jasmin: diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib_pypy/ctypes_config_cache/syslog.ctc.py b/lib_pypy/ctypes_config_cache/syslog.ctc.py deleted file mode 100644 --- a/lib_pypy/ctypes_config_cache/syslog.ctc.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -'ctypes_configure' source for syslog.py. -Run this to rebuild _syslog_cache.py. -""" - -from ctypes_configure.configure import (configure, - ExternalCompilationInfo, ConstantInteger, DefinedConstantInteger) -import dumpcache - - -_CONSTANTS = ( - 'LOG_EMERG', - 'LOG_ALERT', - 'LOG_CRIT', - 'LOG_ERR', - 'LOG_WARNING', - 'LOG_NOTICE', - 'LOG_INFO', - 'LOG_DEBUG', - - 'LOG_PID', - 'LOG_CONS', - 'LOG_NDELAY', - - 'LOG_KERN', - 'LOG_USER', - 'LOG_MAIL', - 'LOG_DAEMON', - 'LOG_AUTH', - 'LOG_LPR', - 'LOG_LOCAL0', - 'LOG_LOCAL1', - 'LOG_LOCAL2', - 'LOG_LOCAL3', - 'LOG_LOCAL4', - 'LOG_LOCAL5', - 'LOG_LOCAL6', - 'LOG_LOCAL7', -) -_OPTIONAL_CONSTANTS = ( - 'LOG_NOWAIT', - 'LOG_PERROR', - - 'LOG_SYSLOG', - 'LOG_CRON', - 'LOG_UUCP', - 'LOG_NEWS', -) - -# Constant aliases if there are not defined -_ALIAS = ( - ('LOG_SYSLOG', 'LOG_DAEMON'), - ('LOG_CRON', 'LOG_DAEMON'), - ('LOG_NEWS', 'LOG_MAIL'), - ('LOG_UUCP', 'LOG_MAIL'), -) - -class SyslogConfigure: - _compilation_info_ = ExternalCompilationInfo(includes=['sys/syslog.h']) -for key in _CONSTANTS: - setattr(SyslogConfigure, key, ConstantInteger(key)) -for key in _OPTIONAL_CONSTANTS: - setattr(SyslogConfigure, key, DefinedConstantInteger(key)) - -config = configure(SyslogConfigure) -for key in _OPTIONAL_CONSTANTS: - if config[key] is None: - del config[key] -for alias, key in _ALIAS: - config.setdefault(alias, config[key]) - -all_constants = config.keys() -all_constants.sort() -config['ALL_CONSTANTS'] = tuple(all_constants) -dumpcache.dumpcache2('syslog', config) diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -46,16 +46,16 @@ if parent is not None: self.parent = parent - def switch(self, *args): + def switch(self, *args, **kwds): "Switch execution to this greenlet, optionally passing the values " "given as argument(s). Returns the value passed when switching back." - return self.__switch('switch', args) + return self.__switch('switch', (args, kwds)) def throw(self, typ=GreenletExit, val=None, tb=None): "raise exception in greenlet, return value passed when switching back" return self.__switch('throw', typ, val, tb) - def __switch(target, methodname, *args): + def __switch(target, methodname, *baseargs): current = getcurrent() # while not (target.__main or _continulet.is_pending(target)): @@ -65,9 +65,9 @@ greenlet_func = _greenlet_start else: greenlet_func = _greenlet_throw - _continulet.__init__(target, greenlet_func, *args) + _continulet.__init__(target, greenlet_func, *baseargs) methodname = 'switch' - args = () + baseargs = () target.__started = True break # already done, go to the parent instead @@ -78,11 +78,15 @@ # try: unbound_method = getattr(_continulet, methodname) - args = unbound_method(current, *args, to=target) + args, kwds = unbound_method(current, *baseargs, to=target) finally: _tls.current = current # - if len(args) == 1: + if kwds: + if args: + return args, kwds + return kwds + elif len(args) == 1: return args[0] else: return args @@ -129,14 +133,15 @@ _tls.current = gmain def _greenlet_start(greenlet, args): + args, kwds = args _tls.current = greenlet try: - res = greenlet.run(*args) + res = greenlet.run(*args, **kwds) except GreenletExit, e: res = e finally: _continuation.permute(greenlet, greenlet.parent) - return (res,) + return ((res,), None) def _greenlet_throw(greenlet, exc, value, tb): _tls.current = greenlet diff --git a/lib_pypy/grp.py b/lib_pypy/grp.py --- a/lib_pypy/grp.py +++ b/lib_pypy/grp.py @@ -8,6 +8,7 @@ from ctypes import Structure, c_char_p, c_int, POINTER from ctypes_support import standard_c_lib as libc +import _structseq try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f @@ -23,32 +24,13 @@ ('gr_mem', POINTER(c_char_p)), ) -class Group(object): - def __init__(self, gr_name, gr_passwd, gr_gid, gr_mem): - self.gr_name = gr_name - self.gr_passwd = gr_passwd - self.gr_gid = gr_gid - self.gr_mem = gr_mem +class struct_group: + __metaclass__ = _structseq.structseqtype - def __getitem__(self, item): - if item == 0: - return self.gr_name - elif item == 1: - return self.gr_passwd - elif item == 2: - return self.gr_gid - elif item == 3: - return self.gr_mem - else: - raise IndexError(item) - - def __len__(self): - return 4 - - def __repr__(self): - return str((self.gr_name, self.gr_passwd, self.gr_gid, self.gr_mem)) - - # whatever else... + gr_name = _structseq.structseqfield(0) + gr_passwd = _structseq.structseqfield(1) + gr_gid = _structseq.structseqfield(2) + gr_mem = _structseq.structseqfield(3) libc.getgrgid.argtypes = [gid_t] libc.getgrgid.restype = POINTER(GroupStruct) @@ -71,8 +53,8 @@ while res.contents.gr_mem[i]: mem.append(res.contents.gr_mem[i]) i += 1 - return Group(res.contents.gr_name, res.contents.gr_passwd, - res.contents.gr_gid, mem) + return struct_group((res.contents.gr_name, res.contents.gr_passwd, + res.contents.gr_gid, mem)) @builtinify def getgrgid(gid): diff --git a/lib_pypy/pyrepl/curses.py b/lib_pypy/pyrepl/curses.py --- a/lib_pypy/pyrepl/curses.py +++ b/lib_pypy/pyrepl/curses.py @@ -19,11 +19,15 @@ # CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -# avoid importing the whole curses, if possible -try: +# If we are running on top of pypy, we import only _minimal_curses. +# Don't try to fall back to _curses, because that's going to use cffi +# and fall again more loudly. +import sys +if '__pypy__' in sys.builtin_module_names: # pypy case import _minimal_curses as _curses -except ImportError: +else: + # cpython case try: import _curses except ImportError: diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py --- a/lib_pypy/syslog.py +++ b/lib_pypy/syslog.py @@ -1,3 +1,4 @@ +# this cffi version was rewritten based on the # ctypes implementation: Victor Stinner, 2008-05-08 """ This module provides an interface to the Unix syslog library routines. @@ -9,34 +10,84 @@ if sys.platform == 'win32': raise ImportError("No syslog on Windows") -# load the platform-specific cache made by running syslog.ctc.py -from ctypes_config_cache._syslog_cache import * - -from ctypes_support import standard_c_lib as libc -from ctypes import c_int, c_char_p +from cffi import FFI try: from __pypy__ import builtinify except ImportError: builtinify = lambda f: f +ffi = FFI() -# Real prototype is: -# void syslog(int priority, const char *format, ...); -# But we also need format ("%s") and one format argument (message) -_syslog = libc.syslog -_syslog.argtypes = (c_int, c_char_p, c_char_p) -_syslog.restype = None +ffi.cdef(""" +/* mandatory constants */ +#define LOG_EMERG ... +#define LOG_ALERT ... +#define LOG_CRIT ... +#define LOG_ERR ... +#define LOG_WARNING ... +#define LOG_NOTICE ... +#define LOG_INFO ... +#define LOG_DEBUG ... -_openlog = libc.openlog -_openlog.argtypes = (c_char_p, c_int, c_int) -_openlog.restype = None +#define LOG_PID ... +#define LOG_CONS ... +#define LOG_NDELAY ... -_closelog = libc.closelog -_closelog.argtypes = None -_closelog.restype = None +#define LOG_KERN ... +#define LOG_USER ... +#define LOG_MAIL ... +#define LOG_DAEMON ... +#define LOG_AUTH ... +#define LOG_LPR ... +#define LOG_LOCAL0 ... +#define LOG_LOCAL1 ... +#define LOG_LOCAL2 ... +#define LOG_LOCAL3 ... +#define LOG_LOCAL4 ... +#define LOG_LOCAL5 ... +#define LOG_LOCAL6 ... +#define LOG_LOCAL7 ... -_setlogmask = libc.setlogmask -_setlogmask.argtypes = (c_int,) -_setlogmask.restype = c_int +/* optional constants, gets defined to -919919 if missing */ +#define LOG_NOWAIT ... +#define LOG_PERROR ... + +/* aliased constants, gets defined as some other constant if missing */ +#define LOG_SYSLOG ... +#define LOG_CRON ... +#define LOG_UUCP ... +#define LOG_NEWS ... + +/* functions */ +void openlog(const char *ident, int option, int facility); +void syslog(int priority, const char *format, const char *string); +// NB. the signature of syslog() is specialized to the only case we use +void closelog(void); +int setlogmask(int mask); +""") + +lib = ffi.verify(""" +#include + +#ifndef LOG_NOWAIT +#define LOG_NOWAIT -919919 +#endif +#ifndef LOG_PERROR +#define LOG_PERROR -919919 +#endif +#ifndef LOG_SYSLOG +#define LOG_SYSLOG LOG_DAEMON +#endif +#ifndef LOG_CRON +#define LOG_CRON LOG_DAEMON +#endif +#ifndef LOG_UUCP +#define LOG_UUCP LOG_MAIL +#endif +#ifndef LOG_NEWS +#define LOG_NEWS LOG_MAIL +#endif +""") + _S_log_open = False _S_ident_o = None @@ -52,12 +103,17 @@ return None @builtinify -def openlog(ident=None, logoption=0, facility=LOG_USER): +def openlog(ident=None, logoption=0, facility=lib.LOG_USER): global _S_ident_o, _S_log_open if ident is None: ident = _get_argv() - _S_ident_o = c_char_p(ident) # keepalive - _openlog(_S_ident_o, logoption, facility) + if ident is None: + _S_ident_o = ffi.NULL + elif isinstance(ident, str): + _S_ident_o = ffi.new("char[]", ident) # keepalive + else: + raise TypeError("'ident' must be a string or None") + lib.openlog(_S_ident_o, logoption, facility) _S_log_open = True @builtinify @@ -69,19 +125,19 @@ # if log is not opened, open it now if not _S_log_open: openlog() - _syslog(priority, "%s", message) + lib.syslog(priority, "%s", message) @builtinify def closelog(): global _S_log_open, S_ident_o if _S_log_open: - _closelog() + lib.closelog() _S_log_open = False _S_ident_o = None @builtinify def setlogmask(mask): - return _setlogmask(mask) + return lib.setlogmask(mask) @builtinify def LOG_MASK(pri): @@ -91,8 +147,15 @@ def LOG_UPTO(pri): return (1 << (pri + 1)) - 1 -__all__ = ALL_CONSTANTS + ( +__all__ = [] + +for name in sorted(lib.__dict__): + if name.startswith('LOG_'): + value = getattr(lib, name) + if value != -919919: + globals()[name] = value + __all__.append(name) + +__all__ = tuple(__all__) + ( 'openlog', 'syslog', 'closelog', 'setlogmask', 'LOG_MASK', 'LOG_UPTO') - -del ALL_CONSTANTS diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() @@ -144,7 +144,7 @@ requires=module_dependencies.get(modname, []), suggests=module_suggests.get(modname, []), negation=modname not in essential_modules, - validator=get_module_validator(modname)) + ) #validator=get_module_validator(modname)) for modname in all_modules]), BoolOption("allworkingmodules", "use as many working modules as possible", diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -907,7 +907,7 @@ runs at application level. If you need to use modules you have to import them within the test function. -Another possibility to pass in data into the AppTest is to use +Data can be passed into the AppTest using the ``setup_class`` method of the AppTest. All wrapped objects that are attached to the class there and start with ``w_`` can be accessed via self (but without the ``w_``) in the actual test method. An example:: @@ -922,6 +922,46 @@ .. _`run the tests as usual`: +Another possibility is to use cls.space.appexec, for example:: + + class AppTestSomething(object): + def setup_class(cls): + arg = 2 + cls.w_result = cls.space.appexec([cls.space.wrap(arg)], """(arg): + return arg ** 6 + """) + + def test_power(self): + assert self.result == 2 ** 6 + +which executes the code string function with the given arguments at app level. +Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Here is how to define an app level class in ``setup_class`` that can be used +in subsequent tests:: + + class AppTestSet(object): + def setup_class(cls): + w_fakeint = cls.space.appexec([], """(): + class FakeInt(object): + def __init__(self, value): + self.value = value + def __hash__(self): + return hash(self.value) + + def __eq__(self, other): + if other == self.value: + return True + return False + return FakeInt + """) + cls.w_FakeInt = w_fakeint + + def test_fakeint(self): + f1 = self.FakeInt(4) + assert f1 == 4 + assert hash(f1) == hash(4) + + Command line tool test_all -------------------------- diff --git a/pypy/doc/config/objspace.usemodules._pypyjson.txt b/pypy/doc/config/objspace.usemodules._pypyjson.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._pypyjson.txt @@ -0,0 +1,1 @@ +RPython speedups for the stdlib json module diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -6,171 +6,240 @@ code base, ordered by number of commits (which is certainly not a very appropriate measure but it's something):: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Michael Hudson - Holger Krekel - Benjamin Peterson - Christian Tismer - Hakan Ardo - Alex Gaynor - Eric van Riet Paap - Anders Chrigstrom - David Schneider - Richard Emslie - Dan Villiom Podlaski Christiansen - Alexander Schremmer - Aurelien Campeas - Anders Lehmann - Camillo Bruni - Niklaus Haldimann - Leonardo Santagada - Toon Verwaest - Seo Sanghyeon - Lawrence Oluyede - Bartosz Skowron - Jakub Gustak - Guido Wesdorp - Daniel Roberts - Adrien Di Mascio - Laura Creighton - Ludovic Aubry - Niko Matsakis - Jason Creighton - Jacob Hallen - Alex Martelli - Anders Hammarquist - Jan de Mooij - Wim Lavrijsen - Stephan Diehl - Michael Foord - Stefan Schwarzer - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Alexandre Fayolle - Marius Gedminas - Simon Burton - Justin Peel - Jean-Paul Calderone - John Witulski - Lukas Diekmann - holger krekel - Wim Lavrijsen - Dario Bertini - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Valentino Volonghi - Paul deGrandis - Adrian Kuhn - tav - Georg Brandl - Gerald Klix - Wanja Saatkamp - Ronny Pfannschmidt - Boris Feigin - Oscar Nierstrasz - David Malcolm - Eugene Oden - Henry Mason - Sven Hager - Lukas Renggli - Ilya Osadchiy - Guenter Jantzen - Bert Freudenberg - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Michael Schneider - Nicholas Riley - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Igor Trindade Oliveira - Lucian Branescu Mihaila - Olivier Dormond - Jared Grubb - Karl Bartel - Gabriel Lavoie - Victor Stinner - Brian Dorsey - Stuart Williams - Toby Watson - Antoine Pitrou - Justas Sadzevicius - Neil Shepperd - Mikael Schönenberg - Gasper Zejn - Jonathan David Riehl - Elmo Mäntynen - Anders Qvist - Beatrice During - Alexander Sedov - Timo Paulssen - Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry - Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain - Carl Meyer - Pieter Zieschang - Alejandro J. Cura - Sylvain Thenault - Christoph Gerum - Travis Francis Athougies - Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner - Miguel de Val Borro - Ignas Mikalajunas - Artur Lisiecki - Philip Jenvey - Joshua Gilbert - Godefroid Chappelle - Yusei Tahara - Christopher Armstrong - Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Villiom Podlaski Christiansen - Anders Hammarquist - Chris Lambacher - Dinu Gherman - Dan Colish - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead - Jim Baker - Rodrigo Araújo - Romain Guillebert + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns + Eric van Riet Paap + Richard Emslie + Alexander Schremmer + Wim Lavrijsen + Dan Villiom Podlaski Christiansen + Manuel Jacob + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Ronan Lamy + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Ludovic Aubry + Alexander Hesse + Jacob Hallen + Romain Guillebert + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + Michael Foord + Stephan Diehl + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + stian + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Alexandre Fayolle + Simon Burton + Marius Gedminas + John Witulski + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Konstantin Lopuhin + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Paul deGrandis + Ilya Osadchiy + Adrian Kuhn + Boris Feigin + tav + Georg Brandl + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp + Gerald Klix + Mike Blume + Taavi Burns + Oscar Nierstrasz + David Malcolm + Eugene Oden + Henry Mason + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Tobias Oberstein + Remi Meier + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Stuart Williams + Jasper Schulz + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + Neil Shepperd + Mikael Schönenberg + Elmo Mäntynen + Tobias Pape + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Corbin Simpson + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer + Stephan Busemann + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Jim Baker + Rodrigo Araújo + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -96,8 +96,21 @@ Does PyPy have a GIL? Why? ------------------------------------------------- -Yes, PyPy has a GIL. Removing the GIL is very hard. The first problem -is that our garbage collectors are not re-entrant. +Yes, PyPy has a GIL. Removing the GIL is very hard. The problems are +essentially the same as with CPython (including the fact that our +garbage collectors are not thread-safe so far). Fixing it is possible, +as shown by Jython and IronPython, but difficult. It would require +adapting the whole source code of PyPy, including subtle decisions about +whether some effects are ok or not for the user (i.e. the Python +programmer). + +Instead, since 2012, there is work going on on a still very experimental +Software Transactional Memory (STM) version of PyPy. This should give +an alternative PyPy which internally has no GIL, while at the same time +continuing to give the Python programmer the complete illusion of having +one. It would in fact push forward *more* GIL-ish behavior, like +declaring that some sections of the code should run without releasing +the GIL in the middle (these are called *atomic sections* in STM). ------------------------------------------ How do I write extension modules for PyPy? @@ -306,7 +319,7 @@ No, and you shouldn't try. First and foremost, RPython is a language designed for writing interpreters. It is a restricted subset of -Python. If you program is not an interpreter but tries to do "real +Python. If your program is not an interpreter but tries to do "real things", like use *any* part of the standard Python library or *any* 3rd-party library, then it is not RPython to start with. You should only look at RPython if you try to `write your own interpreter`__. @@ -322,8 +335,35 @@ Yes, it is possible with enough effort to compile small self-contained pieces of RPython code doing a few performance-sensitive things. But this case is not interesting for us. If you needed to rewrite the code -in RPython, you could as well have rewritten it in C for example. The -latter is a much more supported, much more documented language `:-)` +in RPython, you could as well have rewritten it in C or C++ or Java for +example. These are much more supported, much more documented languages +`:-)` + + *The above paragraphs are not the whole truth. It* is *true that there + are cases where writing a program as RPython gives you substantially + better speed than running it on top of PyPy. However, the attitude of + the core group of people behind PyPy is to answer: "then report it as a + performance bug against PyPy!".* + + *Here is a more diluted way to put it. The "No, don't!" above is a + general warning we give to new people. They are likely to need a lot + of help from* some *source, because RPython is not so simple nor + extensively documented; but at the same time, we, the pypy core group + of people, are not willing to invest time in supporting 3rd-party + projects that do very different things than interpreters for dynamic + languages --- just because we have other interests and there are only + so many hours a day. So as a summary I believe it is only fair to + attempt to point newcomers at existing alternatives, which are more + mainstream and where they will get help from many people.* + + *If anybody seriously wants to promote RPython anyway, he is welcome + to: we won't actively resist such a plan. There are a lot of things + that could be done to make RPython a better Java-ish language for + example, starting with supporting non-GIL-based multithreading, but we + don't implement them because they have little relevance to us. This + is open source, which means that anybody is free to promote and + develop anything; but it also means that you must let us choose* not + *to go into that direction ourselves.* --------------------------------------------------- Which backends are there for the RPython toolchain? diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -77,3 +77,4 @@ entry point. .. _`introduction to RPython`: getting-started-dev.html +.. _`pytest`: http://pytest.org/ diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -32,11 +32,10 @@ * go to pypy/tool/release and run: force-builds.py /release/ * wait for builds to complete, make sure there are no failures -* run pypy/tool/release/make_release.py, this will build necessary binaries - and upload them to pypy.org +* upload binaries to https://bitbucket.org/pypy/pypy/downloads Following binaries should be built, however, we need more buildbots: - JIT: windows, linux, os/x + JIT: windows, linux, os/x, armhf, armel no JIT: windows, linux, os/x sandbox: linux, os/x diff --git a/pypy/doc/release-2.1.0-beta1.rst b/pypy/doc/release-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta1.rst @@ -0,0 +1,72 @@ +=============== +PyPy 2.1 beta 1 +=============== + +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy. +This beta contains many bugfixes and improvements, numerous improvements to the +numpy in pypy effort. The main feature being that the ARM processor support is +not longer considered alpha level. We would like to thank the `Raspberry Pi +Foundation`_ for supporting the work to finish PyPy's ARM support. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +Highlights +========== + +* Bugfixes to the ARM JIT backend, so that ARM is now an officially + supported processor architecture + +* Stacklet support on ARM + +* Interpreter improvements + +* Various numpy improvements + +* Bugfixes to cffi and ctypes + +* Bugfixes to the stacklet support + +* Improved logging performance + +* Faster sets for objects + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-2.1.rst @@ -0,0 +1,78 @@ +====================== +What's new in PyPy 2.1 +====================== + +.. this is a revision shortly after release-2.0 +.. startrev: a13c07067613 + +.. branch: ndarray-ptp +put and array.put + +.. branch: numpy-pickle +Pickling of numpy arrays and dtypes (including record dtypes) + +.. branch: remove-array-smm +Remove multimethods in the arraymodule + +.. branch: callback-stacklet +Fixed bug when switching stacklets from a C callback + +.. branch: remove-set-smm +Remove multi-methods on sets + +.. branch: numpy-subarrays +Implement subarrays for numpy + +.. branch: remove-dict-smm +Remove multi-methods on dict + +.. branch: remove-list-smm-2 +Remove remaining multi-methods on list + +.. branch: arm-stacklet +Stacklet support for ARM, enables _continuation support + +.. branch: remove-tuple-smm +Remove multi-methods on tuple + +.. branch: remove-iter-smm +Remove multi-methods on iterators + +.. branch: emit-call-x86 +.. branch: emit-call-arm + +.. branch: on-abort-resops +Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects + +.. branch: argsort-segfault +Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) + +.. branch: dtype-isnative +.. branch: ndarray-round + +.. branch: faster-str-of-bigint +Improve performance of str(long). + +.. branch: ndarray-view +Add view to ndarray and zeroD arrays, not on dtype scalars yet + +.. branch: numpypy-segfault +fix segfault caused by iterating over empty ndarrays + +.. branch: identity-set +Faster sets for objects + +.. branch: inline-identityhash +Inline the fast path of id() and hash() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,58 +2,13 @@ What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0 -.. startrev: a13c07067613 +.. this is a revision shortly after release-2.1-beta +.. startrev: 4eb52818e7c0 -.. branch: numpy-pickle -Pickling of numpy arrays and dtypes (including record dtypes) +.. branch: fastjson +Fast json decoder written in RPython, about 3-4x faster than the pure Python +decoder which comes with the stdlib -.. branch: remove-array-smm -Remove multimethods in the arraymodule - -.. branch: callback-stacklet -Fixed bug when switching stacklets from a C callback - -.. branch: remove-set-smm -Remove multi-methods on sets - -.. branch: numpy-subarrays -Implement subarrays for numpy - -.. branch: remove-dict-smm -Remove multi-methods on dict - -.. branch: remove-list-smm-2 -Remove remaining multi-methods on list - -.. branch: arm-stacklet -Stacklet support for ARM, enables _continuation support - -.. branch: remove-tuple-smm -Remove multi-methods on tuple - -.. branch: remove-iter-smm -Remove multi-methods on iterators - -.. branch: emit-call-x86 -.. branch: emit-call-arm - -.. branch: on-abort-resops -Added list of resops to the pypyjit on_abort hook. - -.. branch: logging-perf -Speeds up the stdlib logging module - -.. branch: operrfmt-NT -Adds a couple convenient format specifiers to operationerrfmt - -.. branch: win32-fixes3 -Skip and fix some non-translated (own) tests for win32 builds - -.. branch: ctypes-byref -Add the '_obj' attribute on ctypes pointer() and byref() objects - -.. branch: argsort-segfault -Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) - -.. branch: dtype-isnative +.. branch: improve-str2charp +Improve the performance of I/O writing up to 15% by using memcpy instead of +copying char-by-char in str2charp and get_nonmovingbuffer diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -881,15 +881,15 @@ assert "0 ('hi')" not in output.getvalue() def test_print_to(self): - exec """if 1: - from StringIO import StringIO - s = StringIO() - print >> s, "hi", "lovely!" - assert s.getvalue() == "hi lovely!\\n" - s = StringIO() - print >> s, "hi", "lovely!", - assert s.getvalue() == "hi lovely!" - """ in {} + exec """if 1: + from StringIO import StringIO + s = StringIO() + print >> s, "hi", "lovely!" + assert s.getvalue() == "hi lovely!\\n" + s = StringIO() + print >> s, "hi", "lovely!", + assert s.getvalue() == "hi lovely!" + """ in {} def test_assert_with_tuple_arg(self): try: diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -29,12 +29,12 @@ _application_traceback = None def __init__(self, w_type, w_value, tb=None): - assert w_type is not None self.setup(w_type) self._w_value = w_value self._application_traceback = tb def setup(self, w_type): + assert w_type is not None self.w_type = w_type if not we_are_translated(): self.debug_excs = [] @@ -347,7 +347,6 @@ self.xstrings = strings for i, _, attr in entries: setattr(self, attr, args[i]) - assert w_type is not None def _compute_value(self, space): lst = [None] * (len(formats) + len(formats) + 1) @@ -369,6 +368,18 @@ _fmtcache2[formats] = OpErrFmt return OpErrFmt, strings +class OpErrFmtNoArgs(OperationError): + + def __init__(self, w_type, value): + self.setup(w_type) + self._value = value + + def get_w_value(self, space): + w_value = self._w_value + if w_value is None: + self._w_value = w_value = space.wrap(self._value) + return w_value + def get_operationerr_class(valuefmt): try: result = _fmtcache[valuefmt] @@ -389,6 +400,8 @@ %T - The result of space.type(w_arg).getname(space) """ + if not len(args): + return OpErrFmtNoArgs(w_type, valuefmt) OpErrFmt, strings = get_operationerr_class(valuefmt) return OpErrFmt(w_type, strings, *args) operationerrfmt._annspecialcase_ = 'specialize:arg(1)' diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -347,7 +347,7 @@ (self.use(typ), self.nextarg())) def visit__ObjSpace(self, el): - if self.finger != 0: + if self.finger > 1: raise FastFuncNotSupported self.unwrap.append("space") @@ -414,21 +414,21 @@ mod = "" if mod == 'pypy.interpreter.astcompiler.ast': raise FastFuncNotSupported - if (not mod.startswith('pypy.module.__builtin__') and - not mod.startswith('pypy.module.sys') and - not mod.startswith('pypy.module.math')): - if not func.__name__.startswith('descr'): - raise FastFuncNotSupported + #if (not mod.startswith('pypy.module.__builtin__') and + # not mod.startswith('pypy.module.sys') and + # not mod.startswith('pypy.module.math')): + # if not func.__name__.startswith('descr'): + # raise FastFuncNotSupported d = {} unwrap_info.miniglobals['func'] = func source = """if 1: def fastfunc_%s_%d(%s): return func(%s) - \n""" % (func.__name__, narg, + \n""" % (func.__name__.replace('-', '_'), narg, ', '.join(args), ', '.join(unwrap_info.unwrap)) exec compile2(source) in unwrap_info.miniglobals, d - fastfunc = d['fastfunc_%s_%d' % (func.__name__, narg)] + fastfunc = d['fastfunc_%s_%d' % (func.__name__.replace('-', '_'), narg)] return narg, fastfunc make_fastfunc = staticmethod(make_fastfunc) diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -111,7 +111,7 @@ enc = None if need_encoding: - enc = encoding + enc = encoding v = PyString_DecodeEscape(space, substr, enc) return space.wrap(v) diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -33,6 +33,14 @@ operr3 = operationerrfmt("w_type2", "a %s b %s c", "bar", "4b") assert operr3.__class__ is not operr.__class__ +def test_operationerrfmt_noargs(space): + operr = operationerrfmt(space.w_AttributeError, "no attribute 'foo'") + operr.normalize_exception(space) + val = operr.get_w_value(space) + assert space.isinstance_w(val, space.w_AttributeError) + w_repr = space.repr(val) + assert space.str_w(w_repr) == "AttributeError(\"no attribute 'foo'\",)" + def test_operationerrfmt_T(space): operr = operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -636,11 +636,11 @@ assert fn.code.fast_natural_arity == i|PyCode.FLATPYCALL if i < 5: - def bomb(*args): - assert False, "shortcutting should have avoided this" + def bomb(*args): + assert False, "shortcutting should have avoided this" - code.funcrun = bomb - code.funcrun_obj = bomb + code.funcrun = bomb + code.funcrun_obj = bomb args_w = map(space.wrap, range(i)) w_res = space.call_function(fn, *args_w) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -597,6 +597,32 @@ assert space.is_true(w_res) assert called == [w_app_f, w_app_f] + def test_interp2app_fastcall_method_with_space(self): + class W_X(W_Root): + def descr_f(self, space, w_x): + return w_x + + app_f = gateway.interp2app_temp(W_X.descr_f, unwrap_spec=['self', + gateway.ObjSpace, gateway.W_Root]) + + w_app_f = self.space.wrap(app_f) + + assert isinstance(w_app_f.code, gateway.BuiltinCode2) + + called = [] + fastcall_2 = w_app_f.code.fastcall_2 + def witness_fastcall_2(space, w_func, w_a, w_b): + called.append(w_func) + return fastcall_2(space, w_func, w_a, w_b) + + w_app_f.code.fastcall_2 = witness_fastcall_2 + space = self.space + + w_res = space.call_function(w_app_f, W_X(), space.wrap(3)) + + assert space.is_true(w_res) + assert called == [w_app_f] + def test_plain(self): space = self.space diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -73,7 +73,7 @@ def f(): def f(y): - return x + y + return x + y return f x = 1 @@ -85,7 +85,7 @@ if n: x = 42 def f(y): - return x + y + return x + y return f g0 = f(0).func_closure[0] diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -102,26 +102,26 @@ } def pick_builtin(self, w_globals): - "Look up the builtin module to use from the __builtins__ global" - # pick the __builtins__ roughly in the same way CPython does it - # this is obscure and slow - space = self.space - try: - w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) - except OperationError, e: - if not e.match(space, space.w_KeyError): - raise - else: - if w_builtin is space.builtin: # common case - return space.builtin - if space.isinstance_w(w_builtin, space.w_dict): + "Look up the builtin module to use from the __builtins__ global" + # pick the __builtins__ roughly in the same way CPython does it + # this is obscure and slow + space = self.space + try: + w_builtin = space.getitem(w_globals, space.wrap('__builtins__')) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + else: + if w_builtin is space.builtin: # common case + return space.builtin + if space.isinstance_w(w_builtin, space.w_dict): return module.Module(space, None, w_builtin) - if isinstance(w_builtin, module.Module): - return w_builtin - # no builtin! make a default one. Give them None, at least. - builtin = module.Module(space, None) - space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) - return builtin + if isinstance(w_builtin, module.Module): + return w_builtin + # no builtin! make a default one. Give them None, at least. + builtin = module.Module(space, None) + space.setitem(builtin.w_dict, space.wrap('None'), space.w_None) + return builtin def setup_after_space_initialization(self): """NOT_RPYTHON""" diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -41,8 +41,8 @@ def save_field(self, field_builder): field = field_builder.build() if self.numeric_field: - from pypy.objspace.std.strutil import ParseStringError - from pypy.objspace.std.strutil import string_to_float + from rpython.rlib.rstring import ParseStringError + from rpython.rlib.rfloat import string_to_float self.numeric_field = False try: ff = string_to_float(field) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -101,6 +101,9 @@ raise OperationError( space.w_ValueError, space.wrap(message)) + def check_closed_w(self, space): + self._check_closed(space) + def closed_get_w(self, space): return space.newbool(self.__IOBase_closed) @@ -277,6 +280,7 @@ _checkReadable = interp2app(check_readable_w), _checkWritable = interp2app(check_writable_w), _checkSeekable = interp2app(check_seekable_w), + _checkClosed = interp2app(W_IOBase.check_closed_w), closed = GetSetProperty(W_IOBase.closed_get_w), __dict__ = GetSetProperty(descr_get_dict, descr_set_dict, cls=W_IOBase), __weakref__ = make_weakref_descr(W_IOBase), diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -22,7 +22,9 @@ import io with io.BufferedIOBase() as f: assert not f.closed + f._checkClosed() assert f.closed + raises(ValueError, f._checkClosed) def test_iter(self): import io diff --git a/pypy/module/_minimal_curses/fficurses.py b/pypy/module/_minimal_curses/fficurses.py --- a/pypy/module/_minimal_curses/fficurses.py +++ b/pypy/module/_minimal_curses/fficurses.py @@ -52,7 +52,8 @@ HAS = rffi_platform.Has("setupterm") if rffi_platform.configure(CConfig)['HAS']: return eci - raise ImportError("failed to guess where ncurses is installed") + raise ImportError("failed to guess where ncurses is installed. " + "You might need to install libncurses5-dev or similar.") eci = guess_eci() diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py new file mode 100644 From noreply at buildbot.pypy.org Wed Jul 24 18:53:27 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 24 Jul 2013 18:53:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Documented some branches. Message-ID: <20130724165327.879D31C14BB@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65622:f60bf1df148b Date: 2013-07-24 09:52 -0700 http://bitbucket.org/pypy/pypy/changeset/f60bf1df148b/ Log: Documented some branches. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -31,3 +31,9 @@ more precise information about which functions can be called. Needed for Topaz. .. branch: ssl_moving_write_buffer + +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. From noreply at buildbot.pypy.org Wed Jul 24 19:22:17 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 19:22:17 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: kill teh need for force_virtualizable Message-ID: <20130724172217.CD9961C13EE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65623:ea276f1cdcea Date: 2013-07-24 19:20 +0200 http://bitbucket.org/pypy/pypy/changeset/ea276f1cdcea/ Log: kill teh need for force_virtualizable diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -18,7 +18,7 @@ from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import lltype, rclass +from rpython.rtyper.lltypesystem import lltype, rclass, rffi @@ -731,11 +731,22 @@ self.metainterp.replace_box(box, standard_box) return False if not self.metainterp.heapcache.is_unescaped(box): - self.metainterp.execute_and_record(rop.FORCE_VIRTUALIZABLE, - fielddescr, box) + self.emit_force_virtualizable(fielddescr, box) self.metainterp.heapcache.nonstandard_virtualizables_now_known(box) return True + def emit_force_virtualizable(self, fielddescr, box): + vinfo = fielddescr.get_vinfo() + token_descr = vinfo.vable_token_descr + mi = self.metainterp + tokenbox = mi.execute_and_record(rop.GETFIELD_GC, token_descr, box) + condbox = mi.execute_and_record(rop.PTR_NE, None, tokenbox, + history.CONST_NULL) + funcbox = ConstInt(rffi.cast(lltype.Signed, vinfo.clear_vable_ptr)) + calldescr = vinfo.clear_vable_descr + self.execute_varargs(rop.COND_CALL, [condbox, funcbox, box], + calldescr, False, False) + def _get_virtualizable_field_index(self, fielddescr): # Get the index of a fielddescr. Must only be called for # the "standard" virtualizable. diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -202,8 +202,8 @@ return xy.inst_x res = self.meta_interp(f, [20]) assert res == 134 - self.check_simple_loop(setfield_gc=1, getfield_gc=0, force_virtualizable=1) - self.check_resops(setfield_gc=2, getfield_gc=3) + self.check_simple_loop(setfield_gc=1, getfield_gc=0, cond_call=1) + self.check_resops(setfield_gc=2, getfield_gc=4) # ------------------------------ @@ -384,7 +384,7 @@ res = self.meta_interp(f, [20], enable_opts='') assert res == expected self.check_simple_loop(setarrayitem_gc=1, setfield_gc=0, - getarrayitem_gc=1, arraylen_gc=1, getfield_gc=1) + getarrayitem_gc=1, arraylen_gc=1, getfield_gc=2) # ------------------------------ From noreply at buildbot.pypy.org Wed Jul 24 19:42:56 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 19:42:56 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: kill a short-lived resop Message-ID: <20130724174256.350D01C14BB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65624:7a884a450023 Date: 2013-07-24 19:41 +0200 http://bitbucket.org/pypy/pypy/changeset/7a884a450023/ Log: kill a short-lived resop diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -182,7 +182,7 @@ def arraydescrof(self, A): raise NotImplementedError - def calldescrof(self, FUNC, ARGS, RESULT): + def calldescrof(self, FUNC, ARGS, RESULT, extrainfo): # FUNC is the original function type, but ARGS is a list of types # with Voids removed raise NotImplementedError @@ -298,11 +298,6 @@ def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length): raise NotImplementedError - def bh_force_virtualizable(self, v, descr): - vinfo = descr.get_vinfo() - if vinfo is not None: - vinfo.clear_vable_token(v) - class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -314,8 +314,7 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.xrm.position = i - if (op.has_no_side_effect() and op.result not in self.longevity - and op.opnum != rop.FORCE_VIRTUALIZABLE): + if op.has_no_side_effect() and op.result not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -865,18 +864,6 @@ gc_ll_descr.get_nursery_top_addr(), sizeloc, gcmap) - def consider_force_virtualizable(self, op): - # just do a call for now - vinfo = op.getdescr().get_vinfo() - if vinfo is None: - return # for tests - calldescr = vinfo.clear_vable_descr - assert isinstance(calldescr, CallDescr) - fval = rffi.cast(lltype.Signed, vinfo.clear_vable_ptr) - op = ResOperation(rop.CALL, [ConstInt(fval), op.getarg(0)], None, - descr=calldescr) - self.consider_call(op) - def consider_call_malloc_nursery_varsize(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr if not hasattr(gc_ll_descr, 'max_size_of_young_obj'): diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -84,6 +84,7 @@ OS_UNI_COPY_TO_RAW = 113 OS_JIT_FORCE_VIRTUAL = 120 + OS_JIT_FORCE_VIRTUALIZABLE = 121 # for debugging: _OS_CANRAISE = set([ diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1320,10 +1320,6 @@ from rpython.jit.metainterp import quasiimmut quasiimmut.do_force_quasi_immutable(cpu, struct, mutatefielddescr) - @arguments("cpu", "r", "d") - def bhimpl_force_virtualizable(cpu, v, descr): - cpu.bh_force_virtualizable(v, descr) - @arguments("r") def bhimpl_hint_force_virtualizable(r): pass diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -281,9 +281,6 @@ def do_keepalive(cpu, _, x): pass -def do_force_virtualizable(cpu, _, v, descr): - cpu.bh_force_virtualizable(v.getref_base(), descr) - # ____________________________________________________________ ##def do_force_token(cpu): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -7090,7 +7090,7 @@ ops = """ [i0] p1 = new_with_vtable(ConstClass(node_vtable)) - force_virtualizable(p1) + cond_call(1, 123, p1, descr=clear_vable) jump(i0) """ expected = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -256,12 +256,19 @@ asmdescr = LoopToken() # it can be whatever, it's not a descr though from rpython.jit.metainterp.virtualref import VirtualRefInfo + class FakeWarmRunnerDesc: pass FakeWarmRunnerDesc.cpu = cpu vrefinfo = VirtualRefInfo(FakeWarmRunnerDesc) virtualtokendescr = vrefinfo.descr_virtual_token virtualforceddescr = vrefinfo.descr_forced + FUNC = lltype.FuncType([], lltype.Void) + ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False, + oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) + clear_vable = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, ei) + jit_virtual_ref_vtable = vrefinfo.jit_virtual_ref_vtable jvr_vtable_adr = llmemory.cast_ptr_to_adr(jit_virtual_ref_vtable) @@ -447,7 +454,7 @@ preamble.inputargs = inputargs preamble.resume_at_jump_descr = FakeDescrWithSnapshot() - token = JitCellToken() + token = JitCellToken() preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ [ResOperation(rop.LABEL, jump_args, None, descr=token)] @@ -460,7 +467,7 @@ loop.operations = [preamble.operations[-1]] + \ [inliner.inline_op(op, clone=False) for op in cloned_operations] + \ [ResOperation(rop.JUMP, [inliner.inline_arg(a) for a in jump_args], - None, descr=token)] + None, descr=token)] #[inliner.inline_op(jumpop)] assert loop.operations[-1].getopnum() == rop.JUMP assert loop.operations[0].getopnum() == rop.LABEL @@ -479,7 +486,7 @@ preamble.operations.insert(-1, op) return preamble - + class FakeDescr(compile.ResumeGuardDescr): def clone_if_mutable(self): diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -543,12 +543,6 @@ else: self.emit_operation(op) - def optimize_FORCE_VIRTUALIZABLE(self, op): - val = self.getvalue(op.getarg(0)) - if val.is_virtual(): - return - self.emit_operation(op) - def optimize_CALL_MAY_FORCE(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex @@ -557,6 +551,15 @@ return self.emit_operation(op) + def optimize_COND_CALL(self, op): + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: + value = self.getvalue(op.getarg(2)) + if value.is_virtual(): + return + self.emit_operation(op) + def optimize_VIRTUAL_REF(self, op): # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -491,7 +491,6 @@ 'NEWSTR/1', 'NEWUNICODE/1', '_MALLOC_LAST', - 'FORCE_VIRTUALIZABLE/1d', # forces a non-standard virtualizable '_CANMALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py --- a/rpython/jit/metainterp/test/test_recursive.py +++ b/rpython/jit/metainterp/test/test_recursive.py @@ -796,7 +796,7 @@ return frame.thing.val res = self.meta_interp(main, [0], inline=True) - self.check_resops(force_virtualizable=2) + self.check_resops(cond_call=2) assert res == main(0) def test_directly_call_assembler_virtualizable_reset_token(self): diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py --- a/rpython/jit/metainterp/virtualizable.py +++ b/rpython/jit/metainterp/virtualizable.py @@ -300,9 +300,12 @@ self.clear_vable_ptr = self.warmrunnerdesc.helper_func( FUNCPTR, self.clear_vable_token) FUNC = FUNCPTR.TO + ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False, + oopspecindex=EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE) + self.clear_vable_descr = self.cpu.calldescrof(FUNC, FUNC.ARGS, - FUNC.RESULT, - EffectInfo.LEAST_GENERAL) + FUNC.RESULT, ei) def unwrap_virtualizable_box(self, virtualizable_box): return virtualizable_box.getref(llmemory.GCREF) From noreply at buildbot.pypy.org Wed Jul 24 21:09:29 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 24 Jul 2013 21:09:29 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Remove all "raise NotImplementedError"-methods in StringMethods. Message-ID: <20130724190929.895031C02A1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65625:8418bdebeade Date: 2013-07-24 21:07 +0200 http://bitbucket.org/pypy/pypy/changeset/8418bdebeade/ Log: Remove all "raise NotImplementedError"-methods in StringMethods. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -12,18 +12,6 @@ class StringMethods(object): _mixin_ = True - def _new(self, value): - raise NotImplementedError - - def _len(self): - raise NotImplementedError - - def _val(self, space): - raise NotImplementedError - - def _op_val(self, space, w_other): - raise NotImplementedError - def _sliced(self, space, s, start, stop, orig_obj): assert start >= 0 assert stop >= 0 @@ -411,12 +399,6 @@ sb.append(self._op_val(space, list_w[i])) return self._new(sb.build()) - def _join_return_one(self, space, w_obj): - raise NotImplementedError - - def _join_check_item(self, space, w_obj): - raise NotImplementedError - def _join_autoconvert(self, space, list_w): assert False, 'unreachable' From noreply at buildbot.pypy.org Wed Jul 24 21:21:38 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 21:21:38 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: a crucial optimizations Message-ID: <20130724192138.898871C02A1@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65626:840cbb52a15f Date: 2013-07-24 21:21 +0200 http://bitbucket.org/pypy/pypy/changeset/840cbb52a15f/ Log: a crucial optimizations diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -250,7 +250,7 @@ # This is only safe if the class of the guard_value matches the # class of the guard_*_class, otherwise the intermediate ops might # be executed with wrong classes. - previous_classbox = value.get_constant_class(self.optimizer.cpu) + previous_classbox = value.get_constant_class(self.optimizer.cpu) expected_classbox = self.optimizer.cpu.ts.cls_of_box(op.getarg(1)) assert previous_classbox is not None assert expected_classbox is not None @@ -343,6 +343,15 @@ resvalue = self.getvalue(op.result) self.loop_invariant_results[key] = resvalue + def optimize_COND_CALL(self, op): + arg = op.getarg(0) + val = self.getvalue(arg) + if val.is_constant(): + if val.box.same_constant(CONST_0): + return + op = op.copy_and_change(rop.CALL, args=op.getarglist()[1:]) + self.emit_operation(op) + def _optimize_nullness(self, op, box, expect_nonnull): value = self.getvalue(box) if value.is_nonnull(): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -54,7 +54,7 @@ expected_short = self.parse(expected_short) preamble = self.unroll_and_optimize(loop, call_pure_results) - + # print print "Preamble:" @@ -219,7 +219,7 @@ """ self.optimize_loop(ops, expected) - def test_reverse_of_cast_2(self): + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -1290,7 +1290,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - p46 = same_as(p30) # This same_as should be killed by backend + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -2582,7 +2582,7 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - i101 = same_as(i4) + i101 = same_as(i4) jump(p1, i2, i4, p4, i101) """ expected = """ @@ -3440,7 +3440,7 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call_assembler(i1, descr=asmdescr) setfield_gc(p1, i3, descr=valuedescr) - i143 = same_as(i3) # Should be killed by backend + i143 = same_as(i3) # Should be killed by backend jump(p1, i4, i3) ''' self.optimize_loop(ops, ops, preamble) @@ -3551,7 +3551,7 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - i155 = same_as(i4) + i155 = same_as(i4) jump(i0, i4, i155) ''' expected = ''' @@ -6000,7 +6000,7 @@ [p1, i1, i2, i3] escape(i3) i4 = int_sub(i2, i1) - i5 = same_as(i4) + i5 = same_as(i4) jump(p1, i1, i2, i4, i5) """ expected = """ @@ -7258,7 +7258,7 @@ [i0] i2 = int_lt(i0, 10) guard_true(i2) [] - i1 = int_add(i0, 1) + i1 = int_add(i0, 1) jump(i1) """ self.optimize_loop(ops, expected) @@ -7976,7 +7976,7 @@ jump(i0, p0, i2) """ self.optimize_loop(ops, expected) - + def test_constant_failargs(self): ops = """ [p1, i2, i3] @@ -8057,7 +8057,7 @@ jump() """ self.optimize_loop(ops, expected) - + def test_issue1080_infinitie_loop_simple(self): ops = """ @@ -8089,8 +8089,8 @@ def test_licm_boxed_opaque_getitem(self): ops = """ [p1] - p2 = getfield_gc(p1, descr=nextdescr) - mark_opaque_ptr(p2) + p2 = getfield_gc(p1, descr=nextdescr) + mark_opaque_ptr(p2) guard_class(p2, ConstClass(node_vtable)) [] i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) @@ -8106,8 +8106,8 @@ def test_licm_boxed_opaque_getitem_unknown_class(self): ops = """ [p1] - p2 = getfield_gc(p1, descr=nextdescr) - mark_opaque_ptr(p2) + p2 = getfield_gc(p1, descr=nextdescr) + mark_opaque_ptr(p2) i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) jump(p1) @@ -8123,7 +8123,7 @@ def test_licm_unboxed_opaque_getitem(self): ops = """ [p2] - mark_opaque_ptr(p2) + mark_opaque_ptr(p2) guard_class(p2, ConstClass(node_vtable)) [] i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) @@ -8139,22 +8139,20 @@ def test_licm_unboxed_opaque_getitem_unknown_class(self): ops = """ [p2] - mark_opaque_ptr(p2) + mark_opaque_ptr(p2) i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) jump(p2) """ expected = """ [p2] - i3 = getfield_gc(p2, descr=otherdescr) + i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) jump(p2) """ self.optimize_loop(ops, expected) - - - def test_only_strengthen_guard_if_class_matches(self): + def test_only_strengthen_guard_if_class_matches_2(self): ops = """ [p1] guard_class(p1, ConstClass(node_vtable2)) [] @@ -8164,6 +8162,30 @@ self.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_cond_call_with_a_constant(self): + ops = """ + [p1] + cond_call(1, 123, p1, descr=plaincalldescr) + jump(p1) + """ + expected = """ + [p1] + call(123, p1, descr=plaincalldescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + + def test_cond_call_with_a_constant_2(self): + ops = """ + [p1] + cond_call(0, 123, p1, descr=plaincalldescr) + jump(p1) + """ + expected = """ + [p1] + jump(p1) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Wed Jul 24 21:24:12 2013 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 24 Jul 2013 21:24:12 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: merge fast-slowpath Message-ID: <20130724192412.CE6BC1C02A1@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65627:4d3199d86bfc Date: 2013-07-24 21:23 +0200 http://bitbucket.org/pypy/pypy/changeset/4d3199d86bfc/ Log: merge fast-slowpath diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -250,7 +250,7 @@ # This is only safe if the class of the guard_value matches the # class of the guard_*_class, otherwise the intermediate ops might # be executed with wrong classes. - previous_classbox = value.get_constant_class(self.optimizer.cpu) + previous_classbox = value.get_constant_class(self.optimizer.cpu) expected_classbox = self.optimizer.cpu.ts.cls_of_box(op.getarg(1)) assert previous_classbox is not None assert expected_classbox is not None @@ -343,6 +343,15 @@ resvalue = self.getvalue(op.result) self.loop_invariant_results[key] = resvalue + def optimize_COND_CALL(self, op): + arg = op.getarg(0) + val = self.getvalue(arg) + if val.is_constant(): + if val.box.same_constant(CONST_0): + return + op = op.copy_and_change(rop.CALL, args=op.getarglist()[1:]) + self.emit_operation(op) + def _optimize_nullness(self, op, box, expect_nonnull): value = self.getvalue(box) if value.is_nonnull(): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8165,9 +8165,7 @@ """ self.optimize_loop(ops, expected) - - - def test_only_strengthen_guard_if_class_matches(self): + def test_only_strengthen_guard_if_class_matches_2(self): ops = """ [p1] guard_class(p1, ConstClass(node_vtable2)) [] @@ -8177,6 +8175,30 @@ self.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_cond_call_with_a_constant(self): + ops = """ + [p1] + cond_call(1, 123, p1, descr=plaincalldescr) + jump(p1) + """ + expected = """ + [p1] + call(123, p1, descr=plaincalldescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + + def test_cond_call_with_a_constant_2(self): + ops = """ + [p1] + cond_call(0, 123, p1, descr=plaincalldescr) + jump(p1) + """ + expected = """ + [p1] + jump(p1) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass From noreply at buildbot.pypy.org Wed Jul 24 21:55:11 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 24 Jul 2013 21:55:11 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Pass StringMethods._is_generic() the name of the function instead of the function. Message-ID: <20130724195511.098051C0149@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65628:bbf730de6201 Date: 2013-07-24 21:52 +0200 http://bitbucket.org/pypy/pypy/changeset/bbf730de6201/ Log: Pass StringMethods._is_generic() the name of the function instead of the function. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -277,31 +277,32 @@ return space.wrap(res) @specialize.arg(2) - def _is_generic(self, space, fun): + def _is_generic(self, space, func_name): + func = getattr(self, func_name) v = self._val(space) if len(v) == 0: return space.w_False if len(v) == 1: c = v[0] - return space.newbool(fun(c)) + return space.newbool(func(c)) else: - return self._is_generic_loop(space, v, fun) + return self._is_generic_loop(space, v, func) @specialize.arg(3) - def _is_generic_loop(self, space, v, fun): + def _is_generic_loop(self, space, v, func): for idx in range(len(v)): - if not fun(v[idx]): + if not func(v[idx]): return space.w_False return space.w_True def descr_isalnum(self, space): - return self._is_generic(space, self._isalnum) + return self._is_generic(space, '_isalnum') def descr_isalpha(self, space): - return self._is_generic(space, self._isalpha) + return self._is_generic(space, '_isalpha') def descr_isdigit(self, space): - return self._is_generic(space, self._isdigit) + return self._is_generic(space, '_isdigit') def descr_islower(self, space): v = self._val(space) @@ -317,7 +318,7 @@ return space.newbool(cased) def descr_isspace(self, space): - return self._is_generic(space, self._isspace) + return self._is_generic(space, '_isspace') def descr_istitle(self, space): input = self._val(space) From noreply at buildbot.pypy.org Wed Jul 24 22:05:30 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 24 Jul 2013 22:05:30 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Do the same with _is_generic_loop. Message-ID: <20130724200530.676F01C0149@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65629:7d2f6f9ef392 Date: 2013-07-24 22:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7d2f6f9ef392/ Log: Do the same with _is_generic_loop. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -286,10 +286,11 @@ c = v[0] return space.newbool(func(c)) else: - return self._is_generic_loop(space, v, func) + return self._is_generic_loop(space, v, func_name) @specialize.arg(3) - def _is_generic_loop(self, space, v, func): + def _is_generic_loop(self, space, v, func_name): + func = getattr(self, func_name) for idx in range(len(v)): if not func(v[idx]): return space.w_False From noreply at buildbot.pypy.org Wed Jul 24 22:20:47 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 24 Jul 2013 22:20:47 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix Message-ID: <20130724202047.78F091C02A1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65630:793c108ac10f Date: 2013-07-24 13:20 -0700 http://bitbucket.org/pypy/pypy/changeset/793c108ac10f/ Log: fix diff --git a/pypy/module/cpyext/test/test_structseq.py b/pypy/module/cpyext/test/test_structseq.py --- a/pypy/module/cpyext/test/test_structseq.py +++ b/pypy/module/cpyext/test/test_structseq.py @@ -6,7 +6,7 @@ class AppTestStructSeq(AppTestCpythonExtensionBase): def test_StructSeq(self): - py.test.skip("XXX: https://bugs.pypy.org/issue1557") + skip("XXX: https://bugs.pypy.org/issue1557") module = self.import_extension('foo', prologue=""" #include From noreply at buildbot.pypy.org Wed Jul 24 22:56:56 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 24 Jul 2013 22:56:56 +0200 (CEST) Subject: [pypy-commit] pypy py3k: some debugging for mysteriousness on the buildbot Message-ID: <20130724205656.18DF11C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65631:f1c3de2cb18c Date: 2013-07-24 13:56 -0700 http://bitbucket.org/pypy/pypy/changeset/f1c3de2cb18c/ Log: some debugging for mysteriousness on the buildbot diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -413,7 +413,11 @@ value = space.type(value) result = value.getname(space) else: - result = unicode(value) + try: + result = unicode(value) + except UnicodeDecodeError: + print('_compute_value failed %r' % value) + raise lst[i + i + 1] = result lst[-1] = self.xstrings[-1] return u''.join(lst) From noreply at buildbot.pypy.org Thu Jul 25 00:12:00 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 25 Jul 2013 00:12:00 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Backed out changeset f1c3de2cb18c Message-ID: <20130724221200.0756F1C0130@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65632:750795ca1fcd Date: 2013-07-24 15:11 -0700 http://bitbucket.org/pypy/pypy/changeset/750795ca1fcd/ Log: Backed out changeset f1c3de2cb18c diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -413,11 +413,7 @@ value = space.type(value) result = value.getname(space) else: - try: - result = unicode(value) - except UnicodeDecodeError: - print('_compute_value failed %r' % value) - raise + result = unicode(value) lst[i + i + 1] = result lst[-1] = self.xstrings[-1] return u''.join(lst) From noreply at buildbot.pypy.org Thu Jul 25 01:38:03 2013 From: noreply at buildbot.pypy.org (rxe) Date: Thu, 25 Jul 2013 01:38:03 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add myself Message-ID: <20130724233803.103F31C0130@cobra.cs.uni-duesseldorf.de> Author: Richard Emslie Branch: extradoc Changeset: r5007:cb7f805695b4 Date: 2013-07-25 00:31 +0100 http://bitbucket.org/pypy/extradoc/changeset/cb7f805695b4/ Log: Add myself diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -16,6 +16,7 @@ Laurence Tratt lives there Edd Barrett ? ? Armin Rigo ? ? +Richard Emslie 25/8-2/9 some hotel ==================== ============== ======================= From noreply at buildbot.pypy.org Thu Jul 25 09:15:04 2013 From: noreply at buildbot.pypy.org (krono) Date: Thu, 25 Jul 2013 09:15:04 +0200 (CEST) Subject: [pypy-commit] pypy dotviewer-linewidth: Add linewidth attribute to dotviewer Message-ID: <20130725071504.BDD201C13FC@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: dotviewer-linewidth Changeset: r65633:ada230ad6f68 Date: 2013-07-24 18:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ada230ad6f68/ Log: Add linewidth attribute to dotviewer note that due to 'plain'-format restrictions, only the older 'style="setlinewidth(...)"' style is supported. diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -22,6 +22,7 @@ 'yellow': (255,255,0), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') +re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') def combine(color1, color2, alpha): r1, g1, b1 = color1 @@ -138,6 +139,13 @@ self.yl = float(yl) rest = rest[3:] self.style, self.color = rest + linematch = re_linewidth.match(self.style) + if linematch: + num = linematch.group(1) + self.linewidth = int(round(float(num))) + self.style = self.style[linematch.end(0):] + else: + self.linewidth = 1 self.highlight = False self.cachedbezierpoints = None self.cachedarrowhead = None @@ -520,8 +528,8 @@ fgcolor = highlight_color(fgcolor) points = [self.map(*xy) for xy in edge.bezierpoints()] - def drawedgebody(points=points, fgcolor=fgcolor): - pygame.draw.lines(self.screen, fgcolor, False, points) + def drawedgebody(points=points, fgcolor=fgcolor, width=edge.linewidth): + pygame.draw.lines(self.screen, fgcolor, False, points, width) edgebodycmd.append(drawedgebody) points = [self.map(*xy) for xy in edge.arrowhead()] diff --git a/dotviewer/test/test_interactive.py b/dotviewer/test/test_interactive.py --- a/dotviewer/test/test_interactive.py +++ b/dotviewer/test/test_interactive.py @@ -34,6 +34,23 @@ } ''' +SOURCE2=r'''digraph f { + a; d; e; f; g; h; i; j; k; l; + a -> d [penwidth=1, style="setlinewidth(1)"]; + d -> e [penwidth=2, style="setlinewidth(2)"]; + e -> f [penwidth=4, style="setlinewidth(4)"]; + f -> g [penwidth=8, style="setlinewidth(8)"]; + g -> h [penwidth=16, style="setlinewidth(16)"]; + h -> i [penwidth=32, style="setlinewidth(32)"]; + i -> j [penwidth=64, style="setlinewidth(64)"]; + j -> k [penwidth=128, style="setlinewidth(128)"]; + k -> l [penwidth=256, style="setlinewidth(256)"]; +}''' + + + + + def setup_module(mod): if not option.pygame: py.test.skip("--pygame not enabled") @@ -161,3 +178,10 @@ page = MyPage(str(dotfile)) page.fixedfont = True graphclient.display_page(page) + +def test_linewidth(): + udir.join("graph2.dot").write(SOURCE2) + from dotviewer import graphpage, graphclient + dotfile = udir.join('graph2.dot') + page = graphpage.DotFileGraphPage(str(dotfile)) + graphclient.display_page(page) From noreply at buildbot.pypy.org Thu Jul 25 09:41:47 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 25 Jul 2013 09:41:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: make jitviewer work by ignoring thread numbers in log files Message-ID: <20130725074147.C90921C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65634:88858c31ce32 Date: 2013-07-25 09:40 +0200 http://bitbucket.org/pypy/pypy/changeset/88858c31ce32/ Log: make jitviewer work by ignoring thread numbers in log files diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -361,12 +361,19 @@ i += 1 return res - +def purge_thread_numbers(entry): + result = [] + for line in entry.split('\n'): + line = line[line.find('#')+2:] + result.append(line) + return '\n'.join(result) + def import_log(logname, ParserCls=SimpleParser): log = parse_log_file(logname) hex_re = '0x(-?[\da-f]+)' addrs = {} for entry in extract_category(log, 'jit-backend-addr'): + entry = purge_thread_numbers(entry) m = re.search('bootstrap ' + hex_re, entry) if not m: # a bridge @@ -381,6 +388,7 @@ addrs.setdefault(addr, []).append(name) dumps = {} for entry in extract_category(log, 'jit-backend-dump'): + entry = purge_thread_numbers(entry) backend, _, dump, _ = entry.split("\n") _, addr, _, data = re.split(" +", dump) backend_name = backend.split(" ")[1] diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1203,15 +1203,13 @@ mc.MOV(ebp, mem(ecx, -WORD)) # if gcrootmap and gcrootmap.is_stm: - - if not hasattr(gc_ll_descr, 'P2Wdescr'): raise Exception("unreachable code") wbdescr = gc_ll_descr.P2Wdescr self._stm_barrier_fastpath(mc, wbdescr, [ebp], is_frame=True, align_stack=align_stack) return - + # wbdescr = gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not From noreply at buildbot.pypy.org Thu Jul 25 11:19:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 11:19:11 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: For interp2app(), pick up the docstring from either the interp-level Message-ID: <20130725091911.6DA571C13FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: refactor-str-types Changeset: r65635:de6e41b763ad Date: 2013-07-25 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/de6e41b763ad/ Log: For interp2app(), pick up the docstring from either the interp-level function (as it was done before) or override it with the "doc" keyword argument. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -520,12 +520,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -832,7 +833,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -861,7 +862,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -708,6 +708,18 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_interp2app_doc(self): + space = self.space + def f(space, w_x): + """foo""" + w_f = space.wrap(gateway.interp2app_temp(f)) + assert space.unwrap(space.getattr(w_f, space.wrap('__doc__'))) == 'foo' + # + def g(space, w_x): + never_called + w_g = space.wrap(gateway.interp2app_temp(g, doc='bar')) + assert space.unwrap(space.getattr(w_g, space.wrap('__doc__'))) == 'bar' + class AppTestPyTestMark: @py.test.mark.unlikely_to_exist From noreply at buildbot.pypy.org Thu Jul 25 11:49:13 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 25 Jul 2013 11:49:13 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: merge default Message-ID: <20130725094913.EADFB1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65636:57e65f8dc0fb Date: 2013-07-25 11:05 +0200 http://bitbucket.org/pypy/pypy/changeset/57e65f8dc0fb/ Log: merge default diff too long, truncating to 2000 out of 4031 lines diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -75,6 +76,15 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) @@ -147,5 +157,8 @@ _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,60 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. + +Highlights +========== + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +* During packaging, compile the CFFI tk extension. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,28 @@ .. branch: improve-str2charp Improve the performance of I/O writing up to 15% by using memcpy instead of copying char-by-char in str2charp and get_nonmovingbuffer + +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. + +.. branch: ssl_moving_write_buffer + +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -196,6 +196,11 @@ print >> sys.stderr, "Python", sys.version raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -381,6 +386,7 @@ 'Q': (div_option, Ellipsis), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -371,7 +371,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -226,6 +226,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -36,6 +36,20 @@ } +class IntOpModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'int_add': 'interp_intop.int_add', + 'int_sub': 'interp_intop.int_sub', + 'int_mul': 'interp_intop.int_mul', + 'int_floordiv': 'interp_intop.int_floordiv', + 'int_mod': 'interp_intop.int_mod', + 'int_lshift': 'interp_intop.int_lshift', + 'int_rshift': 'interp_intop.int_rshift', + 'uint_rshift': 'interp_intop.uint_rshift', + } + + class Module(MixedModule): appleveldefs = { } @@ -67,6 +81,7 @@ "builders": BuildersModule, "time": TimeModule, "thread": ThreadModule, + "intop": IntOpModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_intop.py @@ -0,0 +1,39 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rarithmetic import r_uint, intmask + + + at unwrap_spec(n=int, m=int) +def int_add(space, n, m): + return space.wrap(llop.int_add(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_sub(space, n, m): + return space.wrap(llop.int_sub(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mul(space, n, m): + return space.wrap(llop.int_mul(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_floordiv(space, n, m): + return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mod(space, n, m): + return space.wrap(llop.int_mod(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_lshift(space, n, m): + return space.wrap(llop.int_lshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_rshift(space, n, m): + return space.wrap(llop.int_rshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def uint_rshift(space, n, m): + n = r_uint(n) + x = llop.uint_rshift(lltype.Unsigned, n, m) + return space.wrap(intmask(x)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_intop.py @@ -0,0 +1,104 @@ + + +class AppTestIntOp: + spaceconfig = dict(usemodules=['__pypy__']) + + def w_intmask(self, n): + import sys + n &= (sys.maxsize*2+1) + if n > sys.maxsize: + n -= 2*(sys.maxsize+1) + return int(n) + + def test_intmask(self): + import sys + assert self.intmask(sys.maxsize) == sys.maxsize + assert self.intmask(sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(-sys.maxsize-2) == sys.maxsize + N = 2 ** 128 + assert self.intmask(N+sys.maxsize) == sys.maxsize + assert self.intmask(N+sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(N-sys.maxsize-2) == sys.maxsize + + def test_int_add(self): + import sys + from __pypy__ import intop + assert intop.int_add(40, 2) == 42 + assert intop.int_add(sys.maxsize, 1) == -sys.maxsize-1 + assert intop.int_add(-2, -sys.maxsize) == sys.maxsize + + def test_int_sub(self): + import sys + from __pypy__ import intop + assert intop.int_sub(40, -2) == 42 + assert intop.int_sub(sys.maxsize, -1) == -sys.maxsize-1 + assert intop.int_sub(-2, sys.maxsize) == sys.maxsize + + def test_int_mul(self): + import sys + from __pypy__ import intop + assert intop.int_mul(40, -2) == -80 + assert intop.int_mul(-sys.maxsize, -sys.maxsize) == ( + self.intmask(sys.maxsize ** 2)) + + def test_int_floordiv(self): + import sys + from __pypy__ import intop + assert intop.int_floordiv(41, 3) == 13 + assert intop.int_floordiv(41, -3) == -13 + assert intop.int_floordiv(-41, 3) == -13 + assert intop.int_floordiv(-41, -3) == 13 + assert intop.int_floordiv(-sys.maxsize, -1) == sys.maxsize + assert intop.int_floordiv(sys.maxsize, -1) == -sys.maxsize + + def test_int_mod(self): + import sys + from __pypy__ import intop + assert intop.int_mod(41, 3) == 2 + assert intop.int_mod(41, -3) == 2 + assert intop.int_mod(-41, 3) == -2 + assert intop.int_mod(-41, -3) == -2 + assert intop.int_mod(-sys.maxsize, -1) == 0 + assert intop.int_mod(sys.maxsize, -1) == 0 + + def test_int_lshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + assert intop.int_lshift(42, 3) == 42 << 3 + assert intop.int_lshift(0, 3333) == 0 + assert intop.int_lshift(1, bits-2) == 1 << (bits-2) + assert intop.int_lshift(1, bits-1) == -sys.maxsize-1 == (-1) << (bits-1) + assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) + assert intop.int_lshift(-1, bits-1) == -sys.maxsize-1 + assert intop.int_lshift(sys.maxsize // 3, 2) == ( + self.intmask((sys.maxsize // 3) << 2)) + assert intop.int_lshift(-sys.maxsize // 3, 2) == ( + self.intmask((-sys.maxsize // 3) << 2)) + + def test_int_rshift(self): + from __pypy__ import intop + assert intop.int_rshift(42, 3) == 42 >> 3 + assert intop.int_rshift(-42, 3) == (-42) >> 3 + assert intop.int_rshift(0, 3333) == 0 + assert intop.int_rshift(-1, 0) == -1 + assert intop.int_rshift(-1, 1) == -1 + + def test_uint_rshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + N = 1 << bits + assert intop.uint_rshift(42, 3) == 42 >> 3 + assert intop.uint_rshift(-42, 3) == (N-42) >> 3 + assert intop.uint_rshift(0, 3333) == 0 + assert intop.uint_rshift(-1, 0) == -1 + assert intop.uint_rshift(-1, 1) == sys.maxsize + assert intop.uint_rshift(-1, bits-2) == 3 + assert intop.uint_rshift(-1, bits-1) == 1 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1219,6 +1219,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1238,6 +1286,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") @@ -2760,6 +2832,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -150,7 +150,7 @@ return self.do_and_wrap(w_ffitype) -class TestFromAppLevel(object): +class TestToAppLevel(object): spaceconfig = dict(usemodules=('_ffi',)) def setup_class(cls): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1107,6 +1107,14 @@ S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) S2E.get_ffi_type() # does not hang + def test_overflow_error(self): + import _rawffi + A = _rawffi.Array('d') + arg1 = A(1) + raises(OverflowError, "arg1[0] = 10**900") + arg1.free() + + class AppTestAutoFree: spaceconfig = dict(usemodules=['_rawffi', 'struct']) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -473,7 +473,7 @@ option_ptr = rffi.cast(rffi.INTP, value_ptr) option_ptr[0] = space.int_w(w_option) elif cmd == _c.SIO_KEEPALIVE_VALS: - w_onoff, w_time, w_interval = space.unpackiterable(w_option) + w_onoff, w_time, w_interval = space.unpackiterable(w_option, 3) option_ptr = rffi.cast(lltype.Ptr(_c.tcp_keepalive), value_ptr) option_ptr.c_onoff = space.uint_w(w_onoff) option_ptr.c_keepalivetime = space.uint_w(w_time) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -722,7 +722,10 @@ libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -74,7 +74,7 @@ return space.newtuple([w_fileobj, w_filename, w_import_info]) def load_module(space, w_name, w_file, w_filename, w_info): - w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info) + w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info, 3) filename = space.str0_w(w_filename) filemode = space.str_w(w_filemode) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -679,6 +679,10 @@ assert module.__name__ == 'a' assert module.__file__ == 'invalid_path_name' + def test_crash_load_module(self): + import imp + raises(ValueError, imp.load_module, "", "", "", [1, 2, 3, 4]) + class TestAbi: def test_abi_tag(self): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -280,7 +280,7 @@ backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): - return rffi.cast(lltype.Signed, self.storage) + return rffi.cast(lltype.Signed, self.storage) + self.start def get_storage(self): return self.storage diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -318,7 +318,7 @@ if not base.issequence_w(space, w_shape): w_shape = space.newtuple([w_shape,]) else: - w_fldname, w_flddesc = space.fixedview(w_elem) + w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) fldname = space.str_w(w_fldname) if fldname in fields: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2212,6 +2212,11 @@ a = a[::2] i = a.__array_interface__ assert isinstance(i['data'][0], int) + b = array(range(9), dtype=int) + c = b[3:5] + b_data = b.__array_interface__['data'][0] + c_data = c.__array_interface__['data'][0] + assert b_data + 3 * b.dtype.itemsize == c_data def test_array_indexing_one_elem(self): from numpypy import array, arange diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -32,20 +33,21 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'fdopen' : 'app_posix.fdopen', - 'tmpfile' : 'app_posix.tmpfile', - 'popen' : 'app_posix.popen', - 'tmpnam' : 'app_posix.tmpnam', - 'tempnam' : 'app_posix.tempnam', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'fdopen': 'app_posix.fdopen', + 'tmpfile': 'app_posix.tmpfile', + 'popen': 'app_posix.popen', + 'tmpnam': 'app_posix.tmpnam', + 'tempnam': 'app_posix.tempnam', } if os.name == 'nt': appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -53,44 +55,46 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', } if hasattr(os, 'chown'): @@ -167,9 +171,9 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: @@ -177,7 +181,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -186,7 +190,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -194,7 +198,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -65,6 +65,23 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] + +class statvfs_result: + __metaclass__ = structseqtype + + name = osname + ".statvfs_result" + + f_bsize = structseqfield(0) + f_frsize = structseqfield(1) + f_blocks = structseqfield(2) + f_bfree = structseqfield(3) + f_bavail = structseqfield(4) + f_files = structseqfield(5) + f_ffree = structseqfield(6) + f_favail = structseqfield(7) + f_flag = structseqfield(8) + f_namemax = structseqfield(9) + if osname == 'posix': # POSIX: we want to check the file descriptor when fdopen() is called, # not later when we read or write data. So we call fstat(), letting diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,15 +1,17 @@ -from pypy.interpreter.gateway import unwrap_spec +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat from pypy.module.sys.interp_encoding import getfilesystemencoding -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -213,6 +215,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -253,6 +256,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -314,6 +327,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstatvfs(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -169,7 +169,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -183,6 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,6 +125,9 @@ self.llbox = llbox def descr_getint(self, space): + if not jit_hooks.box_isint(self.llbox): + raise OperationError(space.w_NotImplementedError, + space.wrap("Box has no int value")) return space.wrap(jit_hooks.box_getint(self.llbox)) @unwrap_spec(no=int) @@ -182,7 +185,12 @@ @unwrap_spec(no=int) def descr_getarg(self, space, no): - return WrappedBox(jit_hooks.resop_getarg(self.op, no)) + try: + box = jit_hooks.resop_getarg(self.op, no) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("Index out of range")) + return WrappedBox(box) @unwrap_spec(no=int, w_box=WrappedBox) def descr_setarg(self, space, no, w_box): @@ -232,7 +240,8 @@ getarg = interp2app(WrappedOp.descr_getarg), setarg = interp2app(WrappedOp.descr_setarg), result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult) + WrappedOp.descr_setresult), + offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.acceptable_as_base_class = False @@ -342,6 +351,10 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), + asmaddr = interp_attrproperty('asmaddr', cls=W_JitLoopInfo, + doc="Address of machine code"), + asmlen = interp_attrproperty('asmlen', cls=W_JitLoopInfo, + doc="Length of machine code"), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -71,7 +71,7 @@ greenkey) di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'loop', greenkey) - di_loop.asminfo = AsmInfo(offset, 0, 0) + di_loop.asminfo = AsmInfo(offset, 0x42, 12) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'bridge', fail_descr=BasicFailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) @@ -123,6 +123,8 @@ assert info.greenkey[2] == False assert info.loop_no == 0 assert info.type == 'loop' + assert info.asmaddr == 0x42 + assert info.asmlen == 12 raises(TypeError, 'info.bridge_no') assert len(info.operations) == 4 int_add = info.operations[0] @@ -132,8 +134,10 @@ assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 assert dmp.call_id == 0 + assert dmp.offset == -1 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num + assert int_add.offset == 0 self.on_compile_bridge() expected = ('>' % repr(self.f.func_code)) @@ -160,6 +164,20 @@ assert 'jit hook' in s.getvalue() assert 'ZeroDivisionError' in s.getvalue() + def test_on_compile_crashes(self): + import pypyjit + loops = [] + def hook(loop): + loops.append(loop) + pypyjit.set_compile_hook(hook) + self.on_compile() + loop = loops[0] + op = loop.operations[2] + # Should not crash the interpreter + raises(IndexError, op.getarg, 2) + assert op.name == 'guard_nonnull' + raises(NotImplementedError, op.getarg(0).getint) + def test_non_reentrant(self): import pypyjit l = [] diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -43,6 +43,7 @@ assert isinstance(res, str) rctime.ctime(rctime.time()) raises(ValueError, rctime.ctime, 1E200) + raises(OverflowError, rctime.ctime, 10**900) def test_gmtime(self): import time as rctime diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/support.py @@ -0,0 +1,33 @@ +import py + +from pypy.conftest import option +from pypy.interpreter.error import OperationError + +def import_lib_pypy(space, name, skipmsg=None): + """Import a top level module ensuring it's sourced from the lib_pypy + package. + + Raises a pytest Skip on ImportError if a skip message was specified. + """ + if option.runappdirect: + try: + mod = __import__('lib_pypy.' + name) + except ImportError as e: + if skipmsg is not None: + py.test.skip('%s (%s))' % (skipmsg, str(e))) + raise + return getattr(mod, name) + + try: + # app-level import should find it from the right place (we + # assert so afterwards) as long as a builtin module doesn't + # overshadow it + failed = ("%s didn't import from lib_pypy. Is a usemodules directive " + "overshadowing it?" % name) + importline = ("(): import %s; assert 'lib_pypy' in %s.__file__, %r; " + "return %s" % (name, name, failed, name)) + return space.appexec([], importline) + except OperationError as e: + if skipmsg is None or not e.match(space, space.w_ImportError): + raise + py.test.skip('%s (%s))' % (skipmsg, str(e))) diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -2,44 +2,51 @@ Extra tests for the pure Python PyPy _collections module (not used in normal PyPy's) """ +from pypy.module.test_lib_pypy.support import import_lib_pypy -from __future__ import absolute_import -from lib_pypy import _collections as collections -import py -class TestDeque: - def setup_method(self, method): - self.n = 10 - self.d = collections.deque(range(self.n)) +class AppTestDeque: + + def setup_class(cls): + space = cls.space + cls.w_collections = import_lib_pypy(space, '_collections') + cls.w_n = space.wrap(10) + + def w_get_deque(self): + return self.collections.deque(range(self.n)) def test_deque(self): - assert len(self.d) == self.n + d = self.get_deque() + assert len(d) == self.n for i in range(self.n): - assert i == self.d[i] + assert i == d[i] for i in range(self.n-1, -1, -1): - assert self.d.pop() == i - assert len(self.d) == 0 + assert d.pop() == i + assert len(d) == 0 def test_deque_iter(self): - it = iter(self.d) - py.test.raises(TypeError, len, it) + d = self.get_deque() + it = iter(d) + raises(TypeError, len, it) assert it.next() == 0 - self.d.pop() - py.test.raises(RuntimeError, it.next) + d.pop() + raises(RuntimeError, it.next) def test_deque_reversed(self): - it = reversed(self.d) - py.test.raises(TypeError, len, it) + d = self.get_deque() + it = reversed(d) + raises(TypeError, len, it) assert it.next() == self.n-1 assert it.next() == self.n-2 - self.d.pop() - py.test.raises(RuntimeError, it.next) + d.pop() + raises(RuntimeError, it.next) def test_deque_remove(self): - d = self.d - py.test.raises(ValueError, d.remove, "foobar") + d = self.get_deque() + raises(ValueError, d.remove, "foobar") def test_mutate_during_remove(self): + collections = self.collections # Handle evil mutator class MutateCmp: def __init__(self, deque, result): @@ -52,24 +59,33 @@ for match in (True, False): d = collections.deque(['ab']) d.extend([MutateCmp(d, match), 'c']) - py.test.raises(IndexError, d.remove, 'c') + raises(IndexError, d.remove, 'c') assert len(d) == 0 -class TestDequeExtra: +class AppTestDequeExtra: + + spaceconfig = dict(usemodules=('binascii', 'struct',)) + + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_remove_empty(self): + collections = self.collections d = collections.deque([]) - py.test.raises(ValueError, d.remove, 1) + raises(ValueError, d.remove, 1) def test_remove_mutating(self): + collections = self.collections class MutatingCmp(object): def __eq__(self, other): d.clear() return True d = collections.deque([MutatingCmp()]) - py.test.raises(IndexError, d.remove, 1) + raises(IndexError, d.remove, 1) def test_remove_failing(self): + collections = self.collections class FailingCmp(object): def __eq__(self, other): assert False @@ -77,10 +93,11 @@ f = FailingCmp() d = collections.deque([1, 2, 3, f, 4, 5]) d.remove(3) - py.test.raises(AssertionError, d.remove, 4) + raises(AssertionError, d.remove, 4) assert d == collections.deque([1, 2, f, 4, 5]) def test_maxlen(self): + collections = self.collections d = collections.deque([], 3) d.append(1); d.append(2); d.append(3); d.append(4) assert list(d) == [2, 3, 4] @@ -95,11 +112,13 @@ assert repr(d3) == "deque([2, 3, 4], maxlen=3)" def test_count(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) assert d.count(2) == 3 assert d.count(4) == 0 def test_reverse(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) d.reverse() assert list(d) == [2, 3, 2, 2, 1] @@ -109,6 +128,7 @@ assert list(d) == range(99, -1, -1) def test_subclass_with_kwargs(self): + collections = self.collections class SubclassWithKwargs(collections.deque): def __init__(self, newarg=1): collections.deque.__init__(self) @@ -116,11 +136,13 @@ # SF bug #1486663 -- this used to erroneously raise a TypeError SubclassWithKwargs(newarg=1) -def foobar(): - return list +class AppTestDefaultDict: -class TestDefaultDict: + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_basic(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory is None d1.default_factory = list @@ -148,20 +170,23 @@ assert 12 not in d2.keys() d2.default_factory = None assert d2.default_factory == None - py.test.raises(KeyError, d2.__getitem__, 15) - py.test.raises(TypeError, collections.defaultdict, 1) + raises(KeyError, d2.__getitem__, 15) + raises(TypeError, collections.defaultdict, 1) def test_constructor(self): + collections = self.collections assert collections.defaultdict(None) == {} assert collections.defaultdict(None, {1: 2}) == {1: 2} def test_missing(self): + collections = self.collections d1 = collections.defaultdict() - py.test.raises(KeyError, d1.__missing__, 42) + raises(KeyError, d1.__missing__, 42) d1.default_factory = list assert d1.__missing__(42) == [] def test_repr(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory == None assert repr(d1) == "defaultdict(None, {})" @@ -181,6 +206,7 @@ assert repr(d4) == "defaultdict(%s, {14: defaultdict(None, {})})" % repr(int) def test_recursive_repr(self): + collections = self.collections # Issue2045: stack overflow when default_factory is a bound method class sub(collections.defaultdict): def __init__(self): @@ -192,6 +218,7 @@ "defaultdict( Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65637:a040b8053e49 Date: 2013-07-25 11:17 +0200 http://bitbucket.org/pypy/pypy/changeset/a040b8053e49/ Log: fix this test diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2280,9 +2280,9 @@ ops = ''' [i0, i1, i2, i3, i4, i5, i6, f0, f1] - cond_call(i1, ConstClass(func_ptr), %s, descr=calldescr) + cond_call(i1, ConstClass(func_ptr), %s) guard_false(i0, descr=faildescr) [i1, i2, i3, i4, i5, i6, f0, f1] - ''' % ', '.join(['i%d' % (j + 2) for j in range(i)]) + ''' % ', '.join(['i%d' % (j + 2) for j in range(i)] + ["descr=calldescr"]) loop = parse(ops, namespace={'faildescr': BasicFailDescr(), 'func_ptr': func_ptr, 'calldescr': calldescr}) From noreply at buildbot.pypy.org Thu Jul 25 11:49:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 25 Jul 2013 11:49:16 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: we changed the way registers are picked. Message-ID: <20130725094916.A71921C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65638:1def2b709c14 Date: 2013-07-25 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/1def2b709c14/ Log: we changed the way registers are picked. diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -84,7 +84,7 @@ nos.reverse() if self.cpu.backend_name.startswith('x86'): if self.cpu.IS_64_BIT: - assert nos == [11, 12, 31] + assert nos == [0, 1, 31] else: assert nos == [4, 5, 25] elif self.cpu.backend_name.startswith('arm'): From noreply at buildbot.pypy.org Thu Jul 25 11:49:17 2013 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 25 Jul 2013 11:49:17 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: ups leftovers Message-ID: <20130725094917.E643A1C0149@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65639:fe4416f455c8 Date: 2013-07-25 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/fe4416f455c8/ Log: ups leftovers diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -89,23 +89,6 @@ tzdescr = None # noone cares # - ARRAY = lltype.GcArray(lltype.Signed) - LIST = lltype.GcStruct('LIST', ('length', lltype.Signed), - ('items', lltype.Ptr(ARRAY))) - lendescr = get_field_descr(self.gc_ll_descr, LIST, 'length') - itemsdescr = get_field_descr(self.gc_ll_descr, LIST, 'items') - arraydescr = get_array_descr(self.gc_ll_descr, ARRAY) - resize_ptr = ConstInt(123) - extrainfo = EffectInfo(None, None, None, None, - extraeffect=EffectInfo.EF_RANDOM_EFFECTS, - oopspecindex=EffectInfo.OS_LIST_RESIZE_GE, - extra_descrs=[lendescr, itemsdescr, arraydescr, - resize_ptr]) - list_resize_descr = get_call_descr(self.gc_ll_descr, - [lltype.Ptr(LIST), lltype.Signed], - lltype.Void, extrainfo) - extrainfo.extra_descrs.append(list_resize_descr) - namespace.update(locals()) # for funcname in self.gc_ll_descr._generated_functions: From noreply at buildbot.pypy.org Thu Jul 25 12:02:07 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 25 Jul 2013 12:02:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: make jitviewer show rewritten trace Message-ID: <20130725100207.6CFF01C01E5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65640:cea9abd542dc Date: 2013-07-25 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/cea9abd542dc/ Log: make jitviewer show rewritten trace diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -452,6 +452,7 @@ mapping[loop.descr] = loop for line in lines: if line: + line = purge_thread_numbers(line) num, count = line.split(':', 2) mapping[num].count = int(count) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -517,9 +517,9 @@ clt.allgcrefs = [] clt.frame_info.clear() # for now - # if log: - # operations = self._inject_debugging_code(looptoken, operations, - # 'e', looptoken.number) + if log: + operations = self._inject_debugging_code(looptoken, operations, + 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -568,7 +568,7 @@ self.cpu.profile_agent.native_code_written(name, rawstart, full_size) return AsmInfo(ops_offset, rawstart + looppos, - size_excluding_failure_stuff - looppos) + size_excluding_failure_stuff - looppos), operations def assemble_bridge(self, faildescr, inputargs, operations, original_loop_token, log, logger=None): @@ -578,9 +578,9 @@ self.setup(original_loop_token) descr_number = compute_unique_id(faildescr) - # if log: - # operations = self._inject_debugging_code(faildescr, operations, - # 'b', descr_number) + if log: + operations = self._inject_debugging_code(faildescr, operations, + 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = RegAlloc(self, self.cpu.translate_support_code) @@ -615,7 +615,9 @@ name = "Bridge # %s" % (descr_number,) self.cpu.profile_agent.native_code_written(name, rawstart, fullsize) - return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) + return AsmInfo(ops_offset, startpos + rawstart, + codeendpos - startpos), operations + def write_pending_failure_recoveries(self): # for each pending guard, generate the code of the recovery stub diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -343,9 +343,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = do_compile_loop(metainterp_sd, loop.inputargs, - operations, original_jitcell_token, - name=loopname) + asminfo, new_ops = do_compile_loop(metainterp_sd, loop.inputargs, + operations, original_jitcell_token, + name=loopname) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() @@ -361,7 +361,7 @@ ops_offset = asminfo.ops_offset else: ops_offset = None - metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, + metainterp_sd.logger_ops.log_loop(loop.inputargs, new_ops, n, type, ops_offset, name=loopname) # @@ -387,9 +387,9 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, - operations, - original_loop_token) + asminfo, new_ops = do_compile_bridge(metainterp_sd, faildescr, + inputargs, operations, + original_loop_token) finally: debug_stop("jit-backend") metainterp_sd.profiler.end_backend() @@ -404,7 +404,7 @@ ops_offset = asminfo.ops_offset else: ops_offset = None - metainterp_sd.logger_ops.log_bridge(inputargs, operations, None, faildescr, + metainterp_sd.logger_ops.log_bridge(inputargs, new_ops, None, faildescr, ops_offset) # #if metainterp_sd.warmrunnerdesc is not None: # for tests From noreply at buildbot.pypy.org Thu Jul 25 12:36:35 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 25 Jul 2013 12:36:35 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: added statvfs_result to posix Message-ID: <20130725103635.626A71C101E@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: release-2.1.x Changeset: r65641:fde1203a425d Date: 2013-07-19 15:23 -0700 http://bitbucket.org/pypy/pypy/changeset/fde1203a425d/ Log: added statvfs_result to posix (transplanted from b95b5d21340327350a770cf5f4c0f5b2fc4ebf09) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -35,6 +35,7 @@ appleveldefs = { 'error' : 'app_posix.error', 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', 'fdopen' : 'app_posix.fdopen', 'tmpfile' : 'app_posix.tmpfile', 'popen' : 'app_posix.popen', diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -65,6 +65,23 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] + +class statvfs_result: + __metaclass__ = structseqtype + + name = osname + ".statvfs_result" + + f_bsize = structseqfield(0) + f_frsize = structseqfield(1) + f_blocks = structseqfield(2) + f_bfree = structseqfield(3) + f_bavail = structseqfield(4) + f_files = structseqfield(5) + f_ffree = structseqfield(6) + f_favail = structseqfield(7) + f_flag = structseqfield(8) + f_namemax = structseqfield(9) + if osname == 'posix': # POSIX: we want to check the file descriptor when fdopen() is called, # not later when we read or write data. So we call fstat(), letting From noreply at buildbot.pypy.org Thu Jul 25 12:43:47 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 25 Jul 2013 12:43:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: let jitviewer show symbol names for addresses (like viewcode.py) Message-ID: <20130725104347.9FBBA1C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65642:71e70dc573d8 Date: 2013-07-25 12:42 +0200 http://bitbucket.org/pypy/pypy/changeset/71e70dc573d8/ Log: let jitviewer show symbol names for addresses (like viewcode.py) diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -4,6 +4,8 @@ from rpython.jit.tool.oparser import OpParser from rpython.tool.logparser import parse_log_file, extract_category from copy import copy +from rpython.jit.backend.tool.viewcode import (machine_code_dump, load_symbols, + lineaddresses) def parse_code_data(arg): name = None @@ -74,7 +76,7 @@ use_mock_model = True def postprocess(self, loop, backend_dump=None, backend_tp=None, - dump_start=0): + dump_start=0, symbols=None): if backend_dump is not None: raw_asm = self._asm_disassemble(backend_dump.decode('hex'), backend_tp, dump_start) @@ -89,8 +91,15 @@ if not start: start = int(adr.strip(":"), 16) ofs = int(adr.strip(":"), 16) - start + # add symbols to addresses: + for addr in lineaddresses(v): + sym = symbols.get(addr) + if sym: + v = '%s\t%s\n' % (v.rstrip(), + sym.replace('\xb7', '')) if ofs >= 0: asm.append((ofs, v.strip("\n"))) + # asm_index = 0 for i, op in enumerate(loop.operations): end = 0 @@ -113,7 +122,6 @@ return loop def _asm_disassemble(self, d, origin_addr, tp): - from rpython.jit.backend.tool.viewcode import machine_code_dump return list(machine_code_dump(d, tp, origin_addr)) @classmethod @@ -387,15 +395,22 @@ addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) dumps = {} + executables = set() + symbols = {} for entry in extract_category(log, 'jit-backend-dump'): entry = purge_thread_numbers(entry) - backend, _, dump, _ = entry.split("\n") + backend, executable, dump, _ = entry.split("\n") + _, executable = executable.split(" ") + if executable not in executables: + symbols.update(load_symbols(executable)) + executables.add(executable) _, addr, _, data = re.split(" +", dump) backend_name = backend.split(" ")[1] addr = int(addr[1:], 16) if addr in addrs and addrs[addr]: name = addrs[addr].pop(0) # they should come in order dumps[name] = (backend_name, addr, data) + loops = [] for entry in extract_category(log, 'jit-log-opt'): parser = ParserCls(entry, None, {}, 'lltype', None, @@ -416,7 +431,8 @@ bname=bname, loop=loop: parser.postprocess(loop, backend_tp=bname, backend_dump=dump, - dump_start=start_ofs)) + dump_start=start_ofs, + symbols=symbols)) loops += split_trace(loop) return log, loops diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -205,7 +205,8 @@ for addr in lineaddresses(line): sym = symbols.get(addr) if sym: - lines[i] = '%s\t%s\n' % (lines[i].rstrip(), sym) + lines[i] = '%s\t%s\n' % (lines[i].rstrip(), + str(sym).strip('\xb7')) self.text = ''.join(lines) return self.text From noreply at buildbot.pypy.org Thu Jul 25 12:49:36 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 25 Jul 2013 12:49:36 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add me & friend to london sprint Message-ID: <20130725104936.2230E1C101E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5008:2be03f146718 Date: 2013-07-25 12:49 +0200 http://bitbucket.org/pypy/extradoc/changeset/2be03f146718/ Log: add me & friend to london sprint diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -17,6 +17,8 @@ Edd Barrett ? ? Armin Rigo ? ? Richard Emslie 25/8-2/9 some hotel +Remi Meier 24/8-1/9 ? +Marko Bencun 24/8-1/9 ? ==================== ============== ======================= From noreply at buildbot.pypy.org Thu Jul 25 13:43:41 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 25 Jul 2013 13:43:41 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove oosend support from translator.backendopt Message-ID: <20130725114341.8F0E21C101E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65643:5168fe5f82b9 Date: 2013-07-25 13:42 +0200 http://bitbucket.org/pypy/pypy/changeset/5168fe5f82b9/ Log: Remove oosend support from translator.backendopt diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -20,10 +20,6 @@ fnobj = op.args[0].value._obj return getattr(fnobj, 'canraise', True) - def analyze_external_method(self, op, TYPE, meth): - assert op.opname == 'oosend' - return getattr(meth, '_can_raise', True) - def analyze_exceptblock(self, block, seen=None): return True diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -63,9 +63,6 @@ result, self.analyze_direct_call(graph, seen)) return result - def analyze_external_method(self, op, TYPE, meth): - return self.top_result() - def analyze_link(self, graph, link): return self.bottom_result() @@ -96,14 +93,6 @@ if self.verbose and x: self.dump_info('analyze_indirect_call(%s): %r' % (graphs, x)) return x - elif op.opname == "oosend": - name = op.args[0].value - TYPE = op.args[1].concretetype - _, meth = TYPE._lookup(name) - graph = getattr(meth, 'graph', None) - if graph is None: - return self.analyze_external_method(op, TYPE, meth) - return self.analyze_oosend(TYPE, name, seen) x = self.analyze_simple_operation(op, graphinfo) if self.verbose and x: self.dump_info('%s: %r' % (op, x)) From noreply at buildbot.pypy.org Thu Jul 25 15:30:11 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 15:30:11 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Try to use interpindirect2app. Message-ID: <20130725133011.C9B971C146E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65644:a2881bd0264c Date: 2013-07-25 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/a2881bd0264c/ Log: Try to use interpindirect2app. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -3,7 +3,7 @@ from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import interp2app, interpindirect2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature from pypy.objspace.std import bytesobject from pypy.objspace.std.intobject import W_IntObject @@ -427,58 +427,58 @@ __repr__ = interp2app(W_BytearrayObject.descr_repr), __str__ = interp2app(W_BytearrayObject.descr_str), - __eq__ = interp2app(W_BytearrayObject.descr_eq), - __ne__ = interp2app(W_BytearrayObject.descr_ne), - __lt__ = interp2app(W_BytearrayObject.descr_lt), - __le__ = interp2app(W_BytearrayObject.descr_le), - __gt__ = interp2app(W_BytearrayObject.descr_gt), - __ge__ = interp2app(W_BytearrayObject.descr_ge), + __eq__ = interpindirect2app(W_BytearrayObject.descr_eq), + __ne__ = interpindirect2app(W_BytearrayObject.descr_ne), + __lt__ = interpindirect2app(W_BytearrayObject.descr_lt), + __le__ = interpindirect2app(W_BytearrayObject.descr_le), + __gt__ = interpindirect2app(W_BytearrayObject.descr_gt), + __ge__ = interpindirect2app(W_BytearrayObject.descr_ge), - __len__ = interp2app(W_BytearrayObject.descr_len), - __contains__ = interp2app(W_BytearrayObject.descr_contains), + __len__ = interpindirect2app(W_BytearrayObject.descr_len), + __contains__ = interpindirect2app(W_BytearrayObject.descr_contains), - __add__ = interp2app(W_BytearrayObject.descr_add), - __mul__ = interp2app(W_BytearrayObject.descr_mul), - __rmul__ = interp2app(W_BytearrayObject.descr_mul), + __add__ = interpindirect2app(W_BytearrayObject.descr_add), + __mul__ = interpindirect2app(W_BytearrayObject.descr_mul), + __rmul__ = interpindirect2app(W_BytearrayObject.descr_mul), - __getitem__ = interp2app(W_BytearrayObject.descr_getitem), + __getitem__ = interpindirect2app(W_BytearrayObject.descr_getitem), - capitalize = interp2app(W_BytearrayObject.descr_capitalize), - center = interp2app(W_BytearrayObject.descr_center), - count = interp2app(W_BytearrayObject.descr_count), - decode = interp2app(W_BytearrayObject.descr_decode), - expandtabs = interp2app(W_BytearrayObject.descr_expandtabs), - find = interp2app(W_BytearrayObject.descr_find), - rfind = interp2app(W_BytearrayObject.descr_rfind), - index = interp2app(W_BytearrayObject.descr_index), - rindex = interp2app(W_BytearrayObject.descr_rindex), - isalnum = interp2app(W_BytearrayObject.descr_isalnum), - isalpha = interp2app(W_BytearrayObject.descr_isalpha), - isdigit = interp2app(W_BytearrayObject.descr_isdigit), - islower = interp2app(W_BytearrayObject.descr_islower), - isspace = interp2app(W_BytearrayObject.descr_isspace), - istitle = interp2app(W_BytearrayObject.descr_istitle), - isupper = interp2app(W_BytearrayObject.descr_isupper), - join = interp2app(W_BytearrayObject.descr_join), - ljust = interp2app(W_BytearrayObject.descr_ljust), - rjust = interp2app(W_BytearrayObject.descr_rjust), - lower = interp2app(W_BytearrayObject.descr_lower), - partition = interp2app(W_BytearrayObject.descr_partition), - rpartition = interp2app(W_BytearrayObject.descr_rpartition), - replace = interp2app(W_BytearrayObject.descr_replace), - split = interp2app(W_BytearrayObject.descr_split), - rsplit = interp2app(W_BytearrayObject.descr_rsplit), - splitlines = interp2app(W_BytearrayObject.descr_splitlines), - startswith = interp2app(W_BytearrayObject.descr_startswith), - endswith = interp2app(W_BytearrayObject.descr_endswith), - strip = interp2app(W_BytearrayObject.descr_strip), - lstrip = interp2app(W_BytearrayObject.descr_lstrip), - rstrip = interp2app(W_BytearrayObject.descr_rstrip), - swapcase = interp2app(W_BytearrayObject.descr_swapcase), - title = interp2app(W_BytearrayObject.descr_title), - translate = interp2app(W_BytearrayObject.descr_translate), - upper = interp2app(W_BytearrayObject.descr_upper), - zfill = interp2app(W_BytearrayObject.descr_zfill), + capitalize = interpindirect2app(W_BytearrayObject.descr_capitalize), + center = interpindirect2app(W_BytearrayObject.descr_center), + count = interpindirect2app(W_BytearrayObject.descr_count), + decode = interpindirect2app(W_BytearrayObject.descr_decode), + expandtabs = interpindirect2app(W_BytearrayObject.descr_expandtabs), + find = interpindirect2app(W_BytearrayObject.descr_find), + rfind = interpindirect2app(W_BytearrayObject.descr_rfind), + index = interpindirect2app(W_BytearrayObject.descr_index), + rindex = interpindirect2app(W_BytearrayObject.descr_rindex), + isalnum = interpindirect2app(W_BytearrayObject.descr_isalnum), + isalpha = interpindirect2app(W_BytearrayObject.descr_isalpha), + isdigit = interpindirect2app(W_BytearrayObject.descr_isdigit), + islower = interpindirect2app(W_BytearrayObject.descr_islower), + isspace = interpindirect2app(W_BytearrayObject.descr_isspace), + istitle = interpindirect2app(W_BytearrayObject.descr_istitle), + isupper = interpindirect2app(W_BytearrayObject.descr_isupper), + join = interpindirect2app(W_BytearrayObject.descr_join), + ljust = interpindirect2app(W_BytearrayObject.descr_ljust), + rjust = interpindirect2app(W_BytearrayObject.descr_rjust), + lower = interpindirect2app(W_BytearrayObject.descr_lower), + partition = interpindirect2app(W_BytearrayObject.descr_partition), + rpartition = interpindirect2app(W_BytearrayObject.descr_rpartition), + replace = interpindirect2app(W_BytearrayObject.descr_replace), + split = interpindirect2app(W_BytearrayObject.descr_split), + rsplit = interpindirect2app(W_BytearrayObject.descr_rsplit), + splitlines = interpindirect2app(W_BytearrayObject.descr_splitlines), + startswith = interpindirect2app(W_BytearrayObject.descr_startswith), + endswith = interpindirect2app(W_BytearrayObject.descr_endswith), + strip = interpindirect2app(W_BytearrayObject.descr_strip), + lstrip = interpindirect2app(W_BytearrayObject.descr_lstrip), + rstrip = interpindirect2app(W_BytearrayObject.descr_rstrip), + swapcase = interpindirect2app(W_BytearrayObject.descr_swapcase), + title = interpindirect2app(W_BytearrayObject.descr_title), + translate = interpindirect2app(W_BytearrayObject.descr_translate), + upper = interpindirect2app(W_BytearrayObject.descr_upper), + zfill = interpindirect2app(W_BytearrayObject.descr_zfill), __init__ = interp2app(W_BytearrayObject.descr_init), __buffer__ = interp2app(W_BytearrayObject.descr_buffer), diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -2,7 +2,7 @@ from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import interp2app, interpindirect2app, unwrap_spec, WrappedDefault from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format @@ -262,60 +262,60 @@ __str__ = interp2app(W_BytesObject.descr_str), __hash__ = interp2app(W_BytesObject.descr_hash), - __eq__ = interp2app(W_BytesObject.descr_eq), - __ne__ = interp2app(W_BytesObject.descr_ne), - __lt__ = interp2app(W_BytesObject.descr_lt), - __le__ = interp2app(W_BytesObject.descr_le), - __gt__ = interp2app(W_BytesObject.descr_gt), - __ge__ = interp2app(W_BytesObject.descr_ge), + __eq__ = interpindirect2app(W_BytesObject.descr_eq), + __ne__ = interpindirect2app(W_BytesObject.descr_ne), + __lt__ = interpindirect2app(W_BytesObject.descr_lt), + __le__ = interpindirect2app(W_BytesObject.descr_le), + __gt__ = interpindirect2app(W_BytesObject.descr_gt), + __ge__ = interpindirect2app(W_BytesObject.descr_ge), - __len__ = interp2app(W_BytesObject.descr_len), - __contains__ = interp2app(W_BytesObject.descr_contains), + __len__ = interpindirect2app(W_BytesObject.descr_len), + __contains__ = interpindirect2app(W_BytesObject.descr_contains), - __add__ = interp2app(W_BytesObject.descr_add), - __mul__ = interp2app(W_BytesObject.descr_mul), - __rmul__ = interp2app(W_BytesObject.descr_mul), + __add__ = interpindirect2app(W_BytesObject.descr_add), + __mul__ = interpindirect2app(W_BytesObject.descr_mul), + __rmul__ = interpindirect2app(W_BytesObject.descr_mul), - __getitem__ = interp2app(W_BytesObject.descr_getitem), - __getslice__ = interp2app(W_BytesObject.descr_getslice), + __getitem__ = interpindirect2app(W_BytesObject.descr_getitem), + __getslice__ = interpindirect2app(W_BytesObject.descr_getslice), - capitalize = interp2app(W_BytesObject.descr_capitalize), - center = interp2app(W_BytesObject.descr_center), - count = interp2app(W_BytesObject.descr_count), - decode = interp2app(W_BytesObject.descr_decode), - encode = interp2app(W_BytesObject.descr_encode), - expandtabs = interp2app(W_BytesObject.descr_expandtabs), - find = interp2app(W_BytesObject.descr_find), - rfind = interp2app(W_BytesObject.descr_rfind), - index = interp2app(W_BytesObject.descr_index), - rindex = interp2app(W_BytesObject.descr_rindex), - isalnum = interp2app(W_BytesObject.descr_isalnum), - isalpha = interp2app(W_BytesObject.descr_isalpha), - isdigit = interp2app(W_BytesObject.descr_isdigit), - islower = interp2app(W_BytesObject.descr_islower), - isspace = interp2app(W_BytesObject.descr_isspace), - istitle = interp2app(W_BytesObject.descr_istitle), - isupper = interp2app(W_BytesObject.descr_isupper), - join = interp2app(W_BytesObject.descr_join), - ljust = interp2app(W_BytesObject.descr_ljust), - rjust = interp2app(W_BytesObject.descr_rjust), - lower = interp2app(W_BytesObject.descr_lower), - partition = interp2app(W_BytesObject.descr_partition), - rpartition = interp2app(W_BytesObject.descr_rpartition), - replace = interp2app(W_BytesObject.descr_replace), - split = interp2app(W_BytesObject.descr_split), - rsplit = interp2app(W_BytesObject.descr_rsplit), - splitlines = interp2app(W_BytesObject.descr_splitlines), - startswith = interp2app(W_BytesObject.descr_startswith), - endswith = interp2app(W_BytesObject.descr_endswith), - strip = interp2app(W_BytesObject.descr_strip), - lstrip = interp2app(W_BytesObject.descr_lstrip), - rstrip = interp2app(W_BytesObject.descr_rstrip), - swapcase = interp2app(W_BytesObject.descr_swapcase), - title = interp2app(W_BytesObject.descr_title), - translate = interp2app(W_BytesObject.descr_translate), - upper = interp2app(W_BytesObject.descr_upper), - zfill = interp2app(W_BytesObject.descr_zfill), + capitalize = interpindirect2app(W_BytesObject.descr_capitalize), + center = interpindirect2app(W_BytesObject.descr_center), + count = interpindirect2app(W_BytesObject.descr_count), + decode = interpindirect2app(W_BytesObject.descr_decode), + encode = interpindirect2app(W_BytesObject.descr_encode), + expandtabs = interpindirect2app(W_BytesObject.descr_expandtabs), + find = interpindirect2app(W_BytesObject.descr_find), + rfind = interpindirect2app(W_BytesObject.descr_rfind), + index = interpindirect2app(W_BytesObject.descr_index), + rindex = interpindirect2app(W_BytesObject.descr_rindex), + isalnum = interpindirect2app(W_BytesObject.descr_isalnum), + isalpha = interpindirect2app(W_BytesObject.descr_isalpha), + isdigit = interpindirect2app(W_BytesObject.descr_isdigit), + islower = interpindirect2app(W_BytesObject.descr_islower), + isspace = interpindirect2app(W_BytesObject.descr_isspace), + istitle = interpindirect2app(W_BytesObject.descr_istitle), + isupper = interpindirect2app(W_BytesObject.descr_isupper), + join = interpindirect2app(W_BytesObject.descr_join), + ljust = interpindirect2app(W_BytesObject.descr_ljust), + rjust = interpindirect2app(W_BytesObject.descr_rjust), + lower = interpindirect2app(W_BytesObject.descr_lower), + partition = interpindirect2app(W_BytesObject.descr_partition), + rpartition = interpindirect2app(W_BytesObject.descr_rpartition), + replace = interpindirect2app(W_BytesObject.descr_replace), + split = interpindirect2app(W_BytesObject.descr_split), + rsplit = interpindirect2app(W_BytesObject.descr_rsplit), + splitlines = interpindirect2app(W_BytesObject.descr_splitlines), + startswith = interpindirect2app(W_BytesObject.descr_startswith), + endswith = interpindirect2app(W_BytesObject.descr_endswith), + strip = interpindirect2app(W_BytesObject.descr_strip), + lstrip = interpindirect2app(W_BytesObject.descr_lstrip), + rstrip = interpindirect2app(W_BytesObject.descr_rstrip), + swapcase = interpindirect2app(W_BytesObject.descr_swapcase), + title = interpindirect2app(W_BytesObject.descr_title), + translate = interpindirect2app(W_BytesObject.descr_translate), + upper = interpindirect2app(W_BytesObject.descr_upper), + zfill = interpindirect2app(W_BytesObject.descr_zfill), format = interp2app(W_BytesObject.descr_format), __format__ = interp2app(W_BytesObject.descr__format__), diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -2,7 +2,7 @@ from pypy.interpreter import unicodehelper from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import interp2app, interpindirect2app, unwrap_spec, WrappedDefault from pypy.module.unicodedata import unicodedb from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef @@ -403,60 +403,60 @@ __str__ = interp2app(W_UnicodeObject.descr_str), __hash__ = interp2app(W_UnicodeObject.descr_hash), - __eq__ = interp2app(W_UnicodeObject.descr_eq), - __ne__ = interp2app(W_UnicodeObject.descr_ne), - __lt__ = interp2app(W_UnicodeObject.descr_lt), - __le__ = interp2app(W_UnicodeObject.descr_le), - __gt__ = interp2app(W_UnicodeObject.descr_gt), - __ge__ = interp2app(W_UnicodeObject.descr_ge), + __eq__ = interpindirect2app(W_UnicodeObject.descr_eq), + __ne__ = interpindirect2app(W_UnicodeObject.descr_ne), + __lt__ = interpindirect2app(W_UnicodeObject.descr_lt), + __le__ = interpindirect2app(W_UnicodeObject.descr_le), + __gt__ = interpindirect2app(W_UnicodeObject.descr_gt), + __ge__ = interpindirect2app(W_UnicodeObject.descr_ge), - __len__ = interp2app(W_UnicodeObject.descr_len), - __contains__ = interp2app(W_UnicodeObject.descr_contains), + __len__ = interpindirect2app(W_UnicodeObject.descr_len), + __contains__ = interpindirect2app(W_UnicodeObject.descr_contains), - __add__ = interp2app(W_UnicodeObject.descr_add), - __mul__ = interp2app(W_UnicodeObject.descr_mul), - __rmul__ = interp2app(W_UnicodeObject.descr_mul), + __add__ = interpindirect2app(W_UnicodeObject.descr_add), + __mul__ = interpindirect2app(W_UnicodeObject.descr_mul), + __rmul__ = interpindirect2app(W_UnicodeObject.descr_mul), - __getitem__ = interp2app(W_UnicodeObject.descr_getitem), - __getslice__ = interp2app(W_UnicodeObject.descr_getslice), + __getitem__ = interpindirect2app(W_UnicodeObject.descr_getitem), + __getslice__ = interpindirect2app(W_UnicodeObject.descr_getslice), - capitalize = interp2app(W_UnicodeObject.descr_capitalize), - center = interp2app(W_UnicodeObject.descr_center), - count = interp2app(W_UnicodeObject.descr_count), - decode = interp2app(W_UnicodeObject.descr_decode), - encode = interp2app(W_UnicodeObject.descr_encode), - expandtabs = interp2app(W_UnicodeObject.descr_expandtabs), - find = interp2app(W_UnicodeObject.descr_find), - rfind = interp2app(W_UnicodeObject.descr_rfind), - index = interp2app(W_UnicodeObject.descr_index), - rindex = interp2app(W_UnicodeObject.descr_rindex), - isalnum = interp2app(W_UnicodeObject.descr_isalnum), - isalpha = interp2app(W_UnicodeObject.descr_isalpha), - isdigit = interp2app(W_UnicodeObject.descr_isdigit), - islower = interp2app(W_UnicodeObject.descr_islower), - isspace = interp2app(W_UnicodeObject.descr_isspace), - istitle = interp2app(W_UnicodeObject.descr_istitle), - isupper = interp2app(W_UnicodeObject.descr_isupper), - join = interp2app(W_UnicodeObject.descr_join), - ljust = interp2app(W_UnicodeObject.descr_ljust), - rjust = interp2app(W_UnicodeObject.descr_rjust), - lower = interp2app(W_UnicodeObject.descr_lower), - partition = interp2app(W_UnicodeObject.descr_partition), - rpartition = interp2app(W_UnicodeObject.descr_rpartition), - replace = interp2app(W_UnicodeObject.descr_replace), - split = interp2app(W_UnicodeObject.descr_split), - rsplit = interp2app(W_UnicodeObject.descr_rsplit), - splitlines = interp2app(W_UnicodeObject.descr_splitlines), - startswith = interp2app(W_UnicodeObject.descr_startswith), - endswith = interp2app(W_UnicodeObject.descr_endswith), - strip = interp2app(W_UnicodeObject.descr_strip), - lstrip = interp2app(W_UnicodeObject.descr_lstrip), - rstrip = interp2app(W_UnicodeObject.descr_rstrip), - swapcase = interp2app(W_UnicodeObject.descr_swapcase), - title = interp2app(W_UnicodeObject.descr_title), - translate = interp2app(W_UnicodeObject.descr_translate), - upper = interp2app(W_UnicodeObject.descr_upper), - zfill = interp2app(W_UnicodeObject.descr_zfill), + capitalize = interpindirect2app(W_UnicodeObject.descr_capitalize), + center = interpindirect2app(W_UnicodeObject.descr_center), + count = interpindirect2app(W_UnicodeObject.descr_count), + decode = interpindirect2app(W_UnicodeObject.descr_decode), + encode = interpindirect2app(W_UnicodeObject.descr_encode), + expandtabs = interpindirect2app(W_UnicodeObject.descr_expandtabs), + find = interpindirect2app(W_UnicodeObject.descr_find), + rfind = interpindirect2app(W_UnicodeObject.descr_rfind), + index = interpindirect2app(W_UnicodeObject.descr_index), + rindex = interpindirect2app(W_UnicodeObject.descr_rindex), + isalnum = interpindirect2app(W_UnicodeObject.descr_isalnum), + isalpha = interpindirect2app(W_UnicodeObject.descr_isalpha), + isdigit = interpindirect2app(W_UnicodeObject.descr_isdigit), + islower = interpindirect2app(W_UnicodeObject.descr_islower), + isspace = interpindirect2app(W_UnicodeObject.descr_isspace), + istitle = interpindirect2app(W_UnicodeObject.descr_istitle), + isupper = interpindirect2app(W_UnicodeObject.descr_isupper), + join = interpindirect2app(W_UnicodeObject.descr_join), + ljust = interpindirect2app(W_UnicodeObject.descr_ljust), + rjust = interpindirect2app(W_UnicodeObject.descr_rjust), + lower = interpindirect2app(W_UnicodeObject.descr_lower), + partition = interpindirect2app(W_UnicodeObject.descr_partition), + rpartition = interpindirect2app(W_UnicodeObject.descr_rpartition), + replace = interpindirect2app(W_UnicodeObject.descr_replace), + split = interpindirect2app(W_UnicodeObject.descr_split), + rsplit = interpindirect2app(W_UnicodeObject.descr_rsplit), + splitlines = interpindirect2app(W_UnicodeObject.descr_splitlines), + startswith = interpindirect2app(W_UnicodeObject.descr_startswith), + endswith = interpindirect2app(W_UnicodeObject.descr_endswith), + strip = interpindirect2app(W_UnicodeObject.descr_strip), + lstrip = interpindirect2app(W_UnicodeObject.descr_lstrip), + rstrip = interpindirect2app(W_UnicodeObject.descr_rstrip), + swapcase = interpindirect2app(W_UnicodeObject.descr_swapcase), + title = interpindirect2app(W_UnicodeObject.descr_title), + translate = interpindirect2app(W_UnicodeObject.descr_translate), + upper = interpindirect2app(W_UnicodeObject.descr_upper), + zfill = interpindirect2app(W_UnicodeObject.descr_zfill), format = interp2app(W_UnicodeObject.descr_format), __format__ = interp2app(W_UnicodeObject.descr__format__), From noreply at buildbot.pypy.org Thu Jul 25 15:56:05 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 25 Jul 2013 15:56:05 +0200 (CEST) Subject: [pypy-commit] pypy default: update relase notes for 2.1 beta2 Message-ID: <20130725135605.706281C101E@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65645:46dbb7a9edb2 Date: 2013-07-25 15:43 +0200 http://bitbucket.org/pypy/pypy/changeset/46dbb7a9edb2/ Log: update relase notes for 2.1 beta2 diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -3,14 +3,16 @@ =============== We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. -This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. Highlights ========== +* Support for os.statvfs and os.fstatvfs on unix systems. + * Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). -* Fixed issue `1552`_: GreenletExit should inherit from BaseException +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. * Fixed issue `1537`_: numpypy __array_interface__ From noreply at buildbot.pypy.org Thu Jul 25 15:56:06 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 25 Jul 2013 15:56:06 +0200 (CEST) Subject: [pypy-commit] pypy default: skip test_statvfs if statvfs is not available Message-ID: <20130725135606.B65191C101E@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65646:e02836bd3823 Date: 2013-07-25 15:55 +0200 http://bitbucket.org/pypy/pypy/changeset/e02836bd3823/ Log: skip test_statvfs if statvfs is not available diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -184,14 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 - def test_statvfs(self): - st = self.posix.statvfs(".") - assert isinstance(st, self.posix.statvfs_result) - for field in [ - 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', - 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', - ]: - assert hasattr(st, field) + if hasattr(__import__(os.name), "statvfs"): + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) def test_pickle(self): import pickle, os From noreply at buildbot.pypy.org Thu Jul 25 16:36:24 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 25 Jul 2013 16:36:24 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: skip test_statvfs if statvfs is not available Message-ID: <20130725143624.D1EAD1C142B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65647:06f936c49498 Date: 2013-07-25 15:55 +0200 http://bitbucket.org/pypy/pypy/changeset/06f936c49498/ Log: skip test_statvfs if statvfs is not available (transplanted from e02836bd3823d5c3db5b2dea56fba55f8e15094f) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -184,14 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 - def test_statvfs(self): - st = self.posix.statvfs(".") - assert isinstance(st, self.posix.statvfs_result) - for field in [ - 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', - 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', - ]: - assert hasattr(st, field) + if hasattr(__import__(os.name), "statvfs"): + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) def test_pickle(self): import pickle, os From noreply at buildbot.pypy.org Thu Jul 25 16:36:26 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 25 Jul 2013 16:36:26 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Added tag release-2.1-beta2 for changeset 06f936c49498 Message-ID: <20130725143626.11E271C142B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65648:cfa007494f7f Date: 2013-07-25 16:35 +0200 http://bitbucket.org/pypy/pypy/changeset/cfa007494f7f/ Log: Added tag release-2.1-beta2 for changeset 06f936c49498 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -8,3 +8,4 @@ a0e2bc9ceccdd7e734d4c881a051320441ea5200 pypy-2.1-beta a0e2bc9ceccdd7e734d4c881a051320441ea5200 pypy-2.1-beta daf1b0412bfbd0666c19d567e37b29e4a3be5734 pypy-2.1-beta +06f936c494985d62764937336f65cb0131a4e3b6 release-2.1-beta2 From noreply at buildbot.pypy.org Thu Jul 25 17:03:00 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 25 Jul 2013 17:03:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add first part of a fastpath to stm_read_barrier (makes targettlc slower) Message-ID: <20130725150300.441511C0149@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65649:5f5f1a605cb3 Date: 2013-07-25 17:02 +0200 http://bitbucket.org/pypy/pypy/changeset/5f5f1a605cb3/ Log: add first part of a fastpath to stm_read_barrier (makes targettlc slower) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -35,6 +35,8 @@ from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import compute_unique_id from rpython.jit.backend.x86 import stmtlocal +from rpython.rlib import rstm +from rpython.memory.gc.stmgc import StmGC class Assembler386(BaseAssembler): @@ -517,9 +519,9 @@ clt.allgcrefs = [] clt.frame_info.clear() # for now - if log: - operations = self._inject_debugging_code(looptoken, operations, - 'e', looptoken.number) + # if log: + # operations = self._inject_debugging_code(looptoken, operations, + # 'e', looptoken.number) regalloc = RegAlloc(self, self.cpu.translate_support_code) # @@ -578,9 +580,9 @@ self.setup(original_loop_token) descr_number = compute_unique_id(faildescr) - if log: - operations = self._inject_debugging_code(faildescr, operations, - 'b', descr_number) + # if log: + # operations = self._inject_debugging_code(faildescr, operations, + # 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = RegAlloc(self, self.cpu.translate_support_code) @@ -2180,27 +2182,49 @@ assert isinstance(result_loc, RegLoc) mc.POP_r(result_loc.value) + def _get_private_rev_num_addr(self): + assert self.cpu.gc_ll_descr.stm + rn = rstm.get_adr_of_private_rev_num() + rn = rn - stmtlocal.threadlocal_base() + assert rx86.fits_in_32bits(rn) + return rn def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, align_stack=False): assert self.cpu.gc_ll_descr.stm - from rpython.jit.backend.llsupport.gc import STMBarrierDescr + from rpython.jit.backend.llsupport.gc import ( + STMBarrierDescr, STMReadBarrierDescr, STMWriteBarrierDescr) assert isinstance(descr, STMBarrierDescr) assert descr.returns_modified_object loc_base = arglocs[0] assert isinstance(loc_base, RegLoc) - # Write only a CALL to the helper prepared in advance, passing it as - # argument the address of the structure we are writing into - # (the first argument to COND_CALL_GC_WB). + helper_num = 0 if is_frame: helper_num = 4 elif self._regalloc is not None and self._regalloc.xrm.reg_bindings: helper_num += 2 # + # FASTPATH: + # + rn = self._get_private_rev_num_addr() + if isinstance(descr, STMReadBarrierDescr): + # (obj->h_revision != stm_private_rev_num) + # && (FXCACHE_AT(obj) != obj))) + stmtlocal.tl_segment_prefix(mc) + #mc.CMP_jr(rn, loc_base.value) + mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) + mc.CMP(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_REVISION)) + mc.J_il8(rx86.Conditions['Z'], 0) # patched below + jz_location = mc.get_relative_pos() + else: + jz_location = 0 + # + # SLOWPATH_START + # if not is_frame: mc.PUSH(loc_base) - if is_frame and align_stack: + elif is_frame and align_stack: # ||retadr| mc.SUB_ri(esp.value, 16 - WORD) # erase the return address # ||retadr|...|| @@ -2214,10 +2238,15 @@ # result where argument was: mc.POP_r(loc_base.value) - if is_frame and align_stack: mc.ADD_ri(esp.value, 16 - WORD) # erase the return address - + # + # SLOWPATH_END + # + if isinstance(descr, STMReadBarrierDescr): + offset = mc.get_relative_pos() - jz_location + assert 0 < offset <= 127 + mc.overwrite(jz_location - 1, chr(offset)) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -3,6 +3,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry +def get_adr_of_private_rev_num(): + addr = llop.stm_get_adr_of_private_rev_num(llmemory.Address) + return rffi.cast(lltype.Signed, addr) def become_inevitable(): llop.stm_become_inevitable(lltype.Void) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -456,6 +456,8 @@ 'stm_abort_info_pop': LLOp(), 'stm_inspect_abort_info': LLOp(sideeffects=False), + 'stm_get_adr_of_private_rev_num':LLOp(), + # __________ address operations __________ 'boehm_malloc': LLOp(), diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -592,6 +592,7 @@ OP_STM_PUSH_ROOT = _OP_STM OP_STM_POP_ROOT_INTO = _OP_STM OP_STM_GET_ROOT_STACK_TOP = _OP_STM + OP_STM_GET_ADR_OF_PRIVATE_REV_NUM = _OP_STM OP_STM_ALLOCATE = _OP_STM OP_STM_WEAKREF_ALLOCATE = _OP_STM OP_STM_GET_TID = _OP_STM diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -99,6 +99,12 @@ return '%s = (%s)&stm_shadowstack;' % ( result, cdecl(funcgen.lltypename(op.result), '')) +def stm_get_adr_of_private_rev_num(funcgen, op): + result = funcgen.expr(op.result) + return '%s = (%s)&stm_private_rev_num;' % ( + result, cdecl(funcgen.lltypename(op.result), '')) + + def stm_weakref_allocate(funcgen, op): arg0 = funcgen.expr(op.args[0]) arg1 = funcgen.expr(op.args[1]) From noreply at buildbot.pypy.org Thu Jul 25 17:05:59 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 25 Jul 2013 17:05:59 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Fix/hack handling of delayed pointers in getgraph() Message-ID: <20130725150559.B35DA1C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65650:76a086e7dd24 Date: 2013-07-25 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/76a086e7dd24/ Log: Fix/hack handling of delayed pointers in getgraph() diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -1,3 +1,4 @@ +from rpython.rtyper.lltypesystem.lltype import DelayedPointer from rpython.translator.simplify import get_graph from rpython.tool.algo.unionfind import UnionFind @@ -52,7 +53,10 @@ return self.bottom_result() def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj + try: + funcobj = op.args[0].value._obj + except DelayedPointer: + return self.bottom_result() result = self.bottom_result() if hasattr(funcobj, '_callbacks'): bk = self.translator.annotator.bookkeeper diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -20,7 +20,10 @@ f = arg.value if not isinstance(f, lltype._ptr): return None - funcobj = f._obj + try: + funcobj = f._getobj() + except lltype.DelayedPointer: + return None try: callable = funcobj._callable except (AttributeError, KeyError, AssertionError): From noreply at buildbot.pypy.org Thu Jul 25 17:24:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 17:24:24 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: In-progress: refactor gcpage.visit() and related code Message-ID: <20130725152424.5D0351C101E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r435:d86ab3aa636d Date: 2013-07-25 17:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/d86ab3aa636d/ Log: In-progress: refactor gcpage.visit() and related code diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -67,7 +67,7 @@ static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -212,35 +212,160 @@ static struct GcPtrList objects_to_trace; -static void keep_original_alive(gcptr obj) +static gcptr copy_over_original(gcptr obj, gcptr id_copy) { - /* keep alive the original of a visited object */ - gcptr id_copy = (gcptr)obj->h_original; - /* prebuilt original objects may have a predifined - hash in h_original */ - if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - assert(id_copy->h_tid & GCFLAG_PUBLIC); - if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; - /* see fix_outdated() */ - if (!(id_copy->h_tid & GCFLAG_VISITED)) { - id_copy->h_tid |= GCFLAG_VISITED; - assert(!(id_copy->h_tid & GCFLAG_MOVED)); + assert(obj != id_copy); + assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ - /* XXX: may not always need tracing? */ - if (!(id_copy->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, id_copy); + /* check a few flags */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + + assert(id_copy->h_tid & GCFLAG_PUBLIC); + assert(!(id_copy->h_tid & GCFLAG_BACKUP_COPY)); + + /* id_copy may be a stub, but in this case, as the original, it + should have been allocated with a big enough chunk of memory. + Also, obj itself might be a stub. */ + assert(!(id_copy->h_tid & GCFLAG_SMALLSTUB)); + if (!(id_copy->h_tid & GCFLAG_STUB) && !(obj->h_tid & GCFLAG_STUB)) { + assert(stmgc_size(id_copy) == stmgc_size(obj)); + } + + /* add the MOVED flag to 'obj' */ + obj->h_tid |= GCFLAG_MOVED; + + /* copy the object's content */ + dprintf(("copy %p over %p\n", obj, id_copy)); + memcpy(id_copy + 1, obj + 1, stmgc_size(obj) - sizeof(struct stm_object_s)); + + /* copy the object's h_revision number */ + id_copy->h_revision = obj->h_revision; + + /* copy the STUB flag */ + id_copy->h_tid &= ~GCFLAG_STUB; + id_copy->h_tid |= (obj->h_tid & GCFLAG_STUB); + + return id_copy; +} + +static void visit_nonpublic(gcptr obj) +{ + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + assert(!(obj->h_tid & GCFLAG_STUB)); + assert(!(obj->h_tid & GCFLAG_HAS_ID)); + assert(!(obj->h_tid & GCFLAG_SMALLSTUB)); + assert(!(obj->h_tid & GCFLAG_MOVED)); + + if (obj->h_tid & GCFLAG_VISITED) + return; /* already visited */ + + obj->h_tid |= GCFLAG_VISITED; + gcptrlist_insert(&objects_to_trace, obj); +} + +static gcptr visit_public(gcptr obj) +{ + /* The goal is to walk to the most recent copy, then copy its + content back into the h_original, and finally returns this + h_original. + */ + gcptr original; + if (obj->h_original != 0 && + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) + original = (gcptr)obj->h_original; + else + original = obj; + + /* the original object must also be a public object, and cannot + be a small stub. */ + assert(original->h_tid & GCFLAG_PUBLIC); + assert(!(original->h_tid & GCFLAG_SMALLSTUB)); + + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!(original->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(original->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + + /* if 'original' was already visited, we are done */ + if (original->h_tid & GCFLAG_VISITED) + return original; + + /* walk to the head of the chained list */ + while (IS_POINTER(obj->h_revision)) { + if (!(obj->h_revision & 2)) { + obj = (gcptr)obj->h_revision; + assert(obj->h_tid & GCFLAG_PUBLIC); + continue; + } + + /* it's a stub: check the current stealing status */ + assert(obj->h_tid & GCFLAG_STUB); + gcptr obj2 = (gcptr)(obj->h_revision - 2); + + if (obj2->h_tid & GCFLAG_PUBLIC) { + /* the stub target itself was stolen, so is public now. + Continue looping from there. */ + obj = obj2; + continue; + } + + if (obj2->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* the stub target is a private_from_protected. */ + gcptr obj3 = (gcptr)obj2->h_revision; + if (obj3->h_tid & GCFLAG_PUBLIC) { + assert(!(obj3->h_tid & GCFLAG_BACKUP_COPY)); + /* the backup copy was stolen and is now a regular + public object. */ + obj = obj3; + continue; } - } + else { + /* the backup copy was not stolen. Ignore this pair + obj2/obj3, and the head of the public chain is obj. + The pair obj2/obj3 was or will be handled by + mark_all_stack_roots(). */ + assert(obj3->h_tid & GCFLAG_BACKUP_COPY); + break; + } + } else { - /* prebuilt originals won't get collected anyway - and if they are not reachable in any other way, - we only ever need their location, not their content */ + /* the stub target is just a protected object. + The head of the public chain is obj. */ + assert(!IS_POINTER(obj2->h_revision)); + break; } } + + /* copy obj over original */ + if (obj != original) + copy_over_original(obj, original); + + /* return this original */ + original->h_tid |= GCFLAG_VISITED; + gcptrlist_insert(&objects_to_trace, original); + return original; } -static void visit(gcptr *pobj); +static void visit(gcptr *pobj) +{ + /* Visits '*pobj', marking it as surviving and possibly adding it to + objects_to_trace. Fixes *pobj to point to the exact copy that + survived. + */ + gcptr obj = *pobj; + if (obj == NULL); + return; + + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + /* 'obj' is a private or protected copy. */ + visit_nonpublic(obj); + } + else { + *pobj = visit_public(obj); + } +} gcptr stmgcpage_visit(gcptr obj) { @@ -248,203 +373,6 @@ return obj; } -static gcptr copy_over_original(gcptr obj) -{ - assert(!(obj->h_tid & GCFLAG_VISITED)); - assert(!(obj->h_tid & GCFLAG_STUB)); - - if (obj->h_tid & GCFLAG_PUBLIC /* XXX: required? */ - && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) - && obj->h_original) { - - gcptr id_copy = (gcptr)obj->h_original; - assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ - if (!(id_copy->h_tid & GCFLAG_PUBLIC)) - assert(0); - /* return NULL; */ /* could be priv_from_protected with - where backup is stolen and its h-original - points to it. */ - - /* id_copy may be a stub, but in this case, as the original, it - should have been allocated with a big enough chunk of memory */ - assert(!(id_copy->h_tid & GCFLAG_SMALLSTUB)); - assert((id_copy->h_tid & GCFLAG_STUB) || - stmgc_size(id_copy) == stmgc_size(obj)); - /* prehash may be specific hash value for prebuilts, or 0 */ - revision_t prehash = id_copy->h_original; - assert(IMPLIES(prehash, id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - /* old_tid may have prebuilt_original flags that should not be lost */ - revision_t old_tid = id_copy->h_tid; - - memcpy(id_copy, obj, stmgc_size(obj)); - assert(!((id_copy->h_tid ^ old_tid) - & (GCFLAG_BACKUP_COPY //| GCFLAG_STUB, id_copy may be stub - | GCFLAG_PUBLIC | GCFLAG_HAS_ID | GCFLAG_SMALLSTUB - | GCFLAG_PRIVATE_FROM_PROTECTED))); - id_copy->h_original = prehash; - id_copy->h_tid = old_tid & ~(GCFLAG_VISITED |/* will be visited next */ - GCFLAG_STUB); /* no longer a stub */ - - dprintf(("copy %p over %p\n", obj, id_copy)); - - /* for those visiting later: */ - obj->h_revision = (revision_t)id_copy; - - /* mark as MOVED for transactions to fix their - public_to_private. Otherwise, inevitable transactions - would think their public obj was modified (also for - other transactions, but they can abort) */ - obj->h_tid |= GCFLAG_MOVED; - - return id_copy; - } - - return NULL; -} - -static void visit(gcptr *pobj) -{ - gcptr obj = *pobj; - if (obj == NULL) - return; - - restart: - if (obj->h_revision & 1) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_STUB)); - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - - gcptr next = copy_over_original(obj); - if (next) { - revision_t loc = (revision_t)pobj - offsetof(struct stm_object_s, - h_revision); - if ((gcptr)loc != next) - /* we don't want to set h_revision of 'next' to - 'next' itself, it was already set by - copy_over_original to a global head revision */ - *pobj = next; - obj = next; - - assert(obj->h_revision & 1); - assert(!(obj->h_tid & GCFLAG_VISITED)); - goto restart; - } - - obj->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_MOVED)); - - gcptrlist_insert(&objects_to_trace, obj); - - keep_original_alive(obj); - } - } - else if (obj->h_tid & GCFLAG_PUBLIC) { - /* h_revision is a ptr: we have a more recent version */ - gcptr prev_obj = obj; - - if (!(obj->h_revision & 2)) { - /* go visit the more recent version */ - obj = (gcptr)obj->h_revision; - } - else { - /* it's a stub: keep it if it points to a protected version, - because we need to keep the effect of stealing if it is - later accessed by the wrong thread. If it points to a - public object (possibly outdated), we can ignore the stub. - */ - assert(obj->h_tid & GCFLAG_STUB); - obj = (gcptr)(obj->h_revision - 2); - if (!(obj->h_tid & GCFLAG_PUBLIC)) { - prev_obj->h_tid |= GCFLAG_VISITED; - assert(!(prev_obj->h_tid & GCFLAG_MOVED)); - - keep_original_alive(prev_obj); - - assert(*pobj == prev_obj); - /* recursion, but should be only once */ - obj = stmgcpage_visit(obj); - assert(prev_obj->h_tid & GCFLAG_STUB); - prev_obj->h_revision = ((revision_t)obj) | 2; - return; - } - } - - if (!(obj->h_revision & 3)) { - /* obj is neither a stub nor a most recent revision: - completely ignore obj->h_revision */ - - obj = (gcptr)obj->h_revision; - assert(obj->h_tid & GCFLAG_PUBLIC); - prev_obj->h_revision = (revision_t)obj; - } - *pobj = obj; - goto restart; - } - else if (obj->h_tid & GCFLAG_VISITED) { - dprintf(("[already visited: %p]\n", obj)); - assert(obj == *pobj); - assert((obj->h_revision & 3) || /* either odd, or stub */ - (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - return; /* already seen */ - } - else { - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - gcptr B = (gcptr)obj->h_revision; - assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - - if (obj->h_original && (gcptr)obj->h_original != B) { - /* if B is original, it will be visited anyway */ - assert(obj->h_original == B->h_original); - assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - keep_original_alive(obj); - } - - obj->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_MOVED)); - assert(!(obj->h_tid & GCFLAG_STUB)); - - if (!(B->h_tid & GCFLAG_MOVED)) { - B->h_tid |= GCFLAG_VISITED; - assert(!(B->h_tid & GCFLAG_STUB)); - gcptrlist_insert2(&objects_to_trace, obj, B); - } - else { - /* B was copied over its h_original */ - pobj = (gcptr *)&obj->h_revision; - obj = *pobj; - goto restart; - } - - if (IS_POINTER(B->h_revision)) { - assert(B->h_tid & GCFLAG_PUBLIC); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(B->h_revision & 2)); - - pobj = (gcptr *)&B->h_revision; - obj = *pobj; - goto restart; - } - } -} - - -static void visit_keep(gcptr obj) -{ - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - obj->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_MOVED)); - gcptrlist_insert(&objects_to_trace, obj); - - if (IS_POINTER(obj->h_revision)) { - assert(!(obj->h_revision & 2)); - visit((gcptr *)&obj->h_revision); - } - keep_original_alive(obj); - } -} - static void visit_all_objects(void) { while (gcptrlist_size(&objects_to_trace) > 0) { @@ -458,20 +386,18 @@ /* Note about prebuilt roots: 'stm_prebuilt_gcroots' is a list that contains all the ones that have been modified. Because they are themselves not in any page managed by this file, their - GCFLAG_VISITED will not be removed at the end of the current - collection. This is fine because the base object cannot contain - references to the heap. So we decided to systematically set - GCFLAG_VISITED on prebuilt objects. */ + GCFLAG_VISITED is not removed at the end of the current + collection. That's why we remove it here. */ gcptr *pobj = stm_prebuilt_gcroots.items; gcptr *pend = stm_prebuilt_gcroots.items + stm_prebuilt_gcroots.size; - gcptr obj; + gcptr obj, obj2; for (; pobj != pend; pobj++) { obj = *pobj; obj->h_tid &= ~GCFLAG_VISITED; assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - /* assert(IS_POINTER(obj->h_revision)); */ - visit_keep(obj); + obj2 = visit_public(obj); + assert(obj2 == obj); /* it is its own original */ } } @@ -498,8 +424,8 @@ static void mark_all_stack_roots(void) { struct tx_descriptor *d; - struct G2L new_public_to_private; - memset(&new_public_to_private, 0, sizeof(struct G2L)); + struct GcPtrList new_public_to_private; + memset(&new_public_to_private, 0, sizeof(new_public_to_private)); for (d = stm_tx_head; d; d = d->tx_next) { assert(!stm_has_got_any_lock(d)); @@ -513,64 +439,49 @@ /* the current transaction's private copies of public objects */ wlog_t *item; - - /* transactions need to have their pub_to_priv fixed. Otherwise, - they'll think their objects got outdated. Only absolutely - necessary for inevitable transactions (XXX check performance?). */ - dprintf(("start fixup (%p):\n", d)); - G2L_LOOP_FORWARD(d->public_to_private, item) { - gcptr R = item->addr; - gcptr L = item->val; - if (R->h_tid & GCFLAG_MOVED) { - /* R was copied over its original */ - gcptr new_R = (gcptr)R->h_original; - /* gcptrlist_insert(&objects_to_trace, new_R); */ - - g2l_insert(&new_public_to_private, new_R, L); - G2L_LOOP_DELETE(item); - - if (L && L->h_revision == (revision_t)R) { - L->h_revision = (revision_t)new_R; - dprintf((" fixup %p to %p <-> %p\n", R, new_R, L)); - } - else { - dprintf((" fixup %p to %p -> %p\n", R, new_R, L)); - } - } - } G2L_LOOP_END; - - /* reinsert to real pub_to_priv */ - G2L_LOOP_FORWARD(new_public_to_private, item) { - g2l_insert(&d->public_to_private, item->addr, item->val); - } G2L_LOOP_END; - g2l_clear(&new_public_to_private); - - /* now visit them */ G2L_LOOP_FORWARD(d->public_to_private, item) { /* note that 'item->addr' is also in the read set, so if it was outdated, it will be found at that time */ gcptr R = item->addr; gcptr L = item->val; - visit_keep(R); + /* we visit the public object R */ + gcptr new_R = visit_public(R); + + if (new_R != R) { + /* we have to update the key in public_to_private, which + can only be done by deleting the existing key and + (after the loop) re-inserting the new key. */ + G2L_LOOP_DELETE(item); + gcptrlist_insert2(&new_public_to_private, new_R, L); + } + + /* we visit the private copy L --- which at this point + should be private, possibly private_from_protected, + so visit() should return the same private copy */ if (L != NULL) { - /* minor collection found R->L in public_to_young - and R was modified. It then sets item->val to NULL and wants - to abort later. */ - revision_t v = L->h_revision; - visit_keep(L); - /* a bit of custom logic here: if L->h_revision used to - point exactly to R, as set by stealing, then we must - keep this property, even though visit_keep(L) might - decide it would be better to make it point to a more - recent copy. */ - if (v == (revision_t)R) { - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - L->h_revision = v; /* restore */ - } + visit_nonpublic(L); } + } G2L_LOOP_END; + /* reinsert to real pub_to_priv */ + long i, size = new_public_to_private.size; + gcptr *items = new_public_to_private.items; + for (i = 0; i < size; i += 2) { + g2l_insert(&d->public_to_private, items[i], items[i + 1]); + } + gcptrlist_clear(&new_public_to_private); + + /* the current transaction's private copies of protected objects */ + items = d->private_from_protected.items; + for (i = d->private_from_protected.size - 1; i >= 0; i--) { + gcptr obj = items[i]; + assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + visit_nonpublic(obj); + visit((gcptr *)&obj->h_revision); + } + /* make sure that the other lists are empty */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); @@ -587,37 +498,13 @@ d->num_private_from_protected_known_old); } - if (new_public_to_private.raw_start) - g2l_delete_not_used_any_more(&new_public_to_private); + gcptrlist_delete(&new_public_to_private); } static void cleanup_for_thread(struct tx_descriptor *d) { long i; gcptr *items; - - /* It can occur that 'private_from_protected' contains an object that - * has not been visited at all (maybe only in inevitable - * transactions). - */ - items = d->private_from_protected.items; - for (i = d->private_from_protected.size - 1; i >= 0; i--) { - gcptr obj = items[i]; - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - /* we don't copy private / protected objects over prebuilts (yet) */ - assert(!(obj->h_tid & GCFLAG_MOVED)); - - if (!(obj->h_tid & GCFLAG_VISITED)) { - /* forget 'obj' */ - dprintf(("private_from_protected: %p UNLISTED\n", obj)); - items[i] = items[--d->private_from_protected.size]; - } - else { - dprintf(("private_from_protected: %p\n", obj)); - assert(((gcptr)obj->h_revision)->h_tid & GCFLAG_VISITED); - } - } - assert(d->old_objects_to_trace.size == 0); /* If we're aborting this transaction anyway, we don't need to do diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -130,7 +130,7 @@ #define GCFLAG_BACKUP_COPY ... #define GCFLAG_PUBLIC_TO_PRIVATE ... #define GCFLAG_WRITE_BARRIER ... - #define GCFLAG_MOVED ... + #define GCFLAG_MOVED ... #define GCFLAG_STUB ... #define GCFLAG_PRIVATE_FROM_PROTECTED ... #define GCFLAG_HAS_ID ... From noreply at buildbot.pypy.org Thu Jul 25 17:29:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 17:29:24 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Some fixes Message-ID: <20130725152924.BE2201C13FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r436:e007ec3d53f3 Date: 2013-07-25 17:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/e007ec3d53f3/ Log: Some fixes diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -332,8 +332,10 @@ } else { /* the stub target is just a protected object. - The head of the public chain is obj. */ + The head of the public chain is obj. We have to + explicitly keep obj2 alive. */ assert(!IS_POINTER(obj2->h_revision)); + visit_nonpublic(obj2); break; } } @@ -355,7 +357,7 @@ survived. */ gcptr obj = *pobj; - if (obj == NULL); + if (obj == NULL) return; if (!(obj->h_tid & GCFLAG_PUBLIC)) { From noreply at buildbot.pypy.org Thu Jul 25 17:31:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 17:31:06 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix the test Message-ID: <20130725153106.9BCEA1C13FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r437:3afacc15e34b Date: 2013-07-25 17:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/3afacc15e34b/ Log: Fix the test diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -193,12 +193,12 @@ major_collect() major_collect() p1b = lib.stm_pop_root() - assert p1b == p2 - check_free_old(p1) - check_not_free(p2) - p3 = lib.stm_write_barrier(p2) - assert p3 != p2 - assert p3 == lib.stm_write_barrier(p2) + assert p1b == p1 + check_not_free(p1) + check_free_old(p2) + p3 = lib.stm_write_barrier(p1) + assert p3 != p1 + assert p3 == lib.stm_write_barrier(p1) def test_new_version_id_alive(): p1 = oalloc(HDR); make_public(p1) From noreply at buildbot.pypy.org Thu Jul 25 17:35:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 17:35:18 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: More fixes in the tests Message-ID: <20130725153518.A63171C142B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r438:f6b4d8cafc3e Date: 2013-07-25 17:35 +0200 http://bitbucket.org/pypy/stmgc/changeset/f6b4d8cafc3e/ Log: More fixes in the tests diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -589,6 +589,12 @@ p1.h_tid |= GCFLAG_PUBLIC_TO_PRIVATE if p1.h_tid & GCFLAG_PREBUILT_ORIGINAL: lib.stm_add_prebuilt_root(p1) + assert p2.h_original == 0 + assert p1 != p2 + if (p1.h_original == 0) or (p1.h_tid & GCFLAG_PREBUILT_ORIGINAL): + p2.h_original = ffi.cast("revision_t", p1) + else: + p2.h_original = p1.h_original def delegate_original(p1, p2): # no h_original or it is a prebuilt with a specified hash in h_original diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -204,7 +204,6 @@ p1 = oalloc(HDR); make_public(p1) p2 = oalloc(HDR); make_public(p2) delegate(p1, p2) - delegate_original(p1, p2) lib.stm_push_root(p1) major_collect() major_collect() @@ -226,14 +225,14 @@ major_collect() major_collect() p2b = lib.stm_pop_root() - assert p2b == p4 - check_free_old(p1) + assert p2b == p1 + check_not_free(p1) check_free_old(p2) check_free_old(p3) - check_not_free(p4) - p5 = lib.stm_write_barrier(p4) - assert p5 != p4 - assert p5 == lib.stm_write_barrier(p4) + check_free_old(p4) + p5 = lib.stm_write_barrier(p1) + assert p5 != p1 + assert p5 == lib.stm_write_barrier(p1) assert p5 == lib.stm_write_barrier(p5) def test_new_version_kill_intermediate_non_root(): From noreply at buildbot.pypy.org Thu Jul 25 17:39:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 17:39:52 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: More test fixes Message-ID: <20130725153952.DEF1B1C0149@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r439:c00f65b25cce Date: 2013-07-25 17:39 +0200 http://bitbucket.org/pypy/stmgc/changeset/c00f65b25cce/ Log: More test fixes diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -589,22 +589,16 @@ p1.h_tid |= GCFLAG_PUBLIC_TO_PRIVATE if p1.h_tid & GCFLAG_PREBUILT_ORIGINAL: lib.stm_add_prebuilt_root(p1) + # no h_original or it is a prebuilt with a specified hash in h_original assert p2.h_original == 0 assert p1 != p2 + assert p1.h_tid & GCFLAG_OLD + assert p2.h_tid & GCFLAG_OLD if (p1.h_original == 0) or (p1.h_tid & GCFLAG_PREBUILT_ORIGINAL): p2.h_original = ffi.cast("revision_t", p1) else: p2.h_original = p1.h_original -def delegate_original(p1, p2): - # no h_original or it is a prebuilt with a specified hash in h_original - assert (p1.h_original == 0) or (p1.h_tid & GCFLAG_PREBUILT_ORIGINAL) - assert p1.h_tid & GCFLAG_OLD - assert p2.h_original == 0 - assert p1 != p2 - p2.h_original = ffi.cast("revision_t", p1) - - def make_public(p1): """Hack at an object returned by oalloc() to force it public.""" assert classify(p1) == "protected" diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -250,41 +250,43 @@ major_collect() lib.stm_pop_root() check_not_free(p1) - check_free_old(p2) + check_not_free(p2) check_free_old(p3) check_free_old(p4) - check_not_free(p5) + check_free_old(p5) print 'p1:', p1 print ' containing:', rawgetptr(p1, 0) print 'p2:', p2 print 'p3:', p3 print 'p4:', p4 print 'p5:', p5 - assert rawgetptr(p1, 0) == p5 + assert rawgetptr(p1, 0) == p2 def test_new_version_not_kill_intermediate_original(): p1 = oalloc_refs(1); make_public(p1) - p2 = oalloc(HDR); make_public(p2) - p3 = oalloc(HDR); make_public(p3) - p4 = oalloc(HDR); make_public(p4) - p5 = oalloc(HDR); make_public(p5) + p2 = oalloc(HDR + WORD); make_public(p2) + p3 = oalloc(HDR + WORD); make_public(p3) + p4 = oalloc(HDR + WORD); make_public(p4) + p5 = oalloc(HDR + WORD); make_public(p5) delegate(p2, p3) delegate(p3, p4) delegate(p4, p5) rawsetptr(p1, 0, p3) - delegate_original(p3, p2) - delegate_original(p3, p4) - delegate_original(p3, p5) + lib.rawsetlong(p2, 0, 222) + lib.rawsetlong(p3, 0, 333) + lib.rawsetlong(p4, 0, 444) + lib.rawsetlong(p5, 0, 555) lib.stm_push_root(p1) major_collect() lib.stm_pop_root() check_not_free(p1) - check_free_old(p2) - check_not_free(p3) # original + check_not_free(p2) + check_free_old(p3) check_free_old(p4) check_free_old(p5) - assert rawgetptr(p1, 0) == p3 + assert rawgetptr(p1, 0) == p2 + assert lib.rawgetlong(p2, 0) == 555 # copied over from p5 def test_prebuilt_version_1(): p1 = lib.pseudoprebuilt(HDR, 42 + HDR) From noreply at buildbot.pypy.org Thu Jul 25 19:00:43 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 19:00:43 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Back out changeset a2881bd0264c. Message-ID: <20130725170043.D974A1C13FC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65651:a1d09ff7418a Date: 2013-07-25 18:55 +0200 http://bitbucket.org/pypy/pypy/changeset/a1d09ff7418a/ Log: Back out changeset a2881bd0264c. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -3,7 +3,7 @@ from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, interpindirect2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature from pypy.objspace.std import bytesobject from pypy.objspace.std.intobject import W_IntObject @@ -427,58 +427,58 @@ __repr__ = interp2app(W_BytearrayObject.descr_repr), __str__ = interp2app(W_BytearrayObject.descr_str), - __eq__ = interpindirect2app(W_BytearrayObject.descr_eq), - __ne__ = interpindirect2app(W_BytearrayObject.descr_ne), - __lt__ = interpindirect2app(W_BytearrayObject.descr_lt), - __le__ = interpindirect2app(W_BytearrayObject.descr_le), - __gt__ = interpindirect2app(W_BytearrayObject.descr_gt), - __ge__ = interpindirect2app(W_BytearrayObject.descr_ge), + __eq__ = interp2app(W_BytearrayObject.descr_eq), + __ne__ = interp2app(W_BytearrayObject.descr_ne), + __lt__ = interp2app(W_BytearrayObject.descr_lt), + __le__ = interp2app(W_BytearrayObject.descr_le), + __gt__ = interp2app(W_BytearrayObject.descr_gt), + __ge__ = interp2app(W_BytearrayObject.descr_ge), - __len__ = interpindirect2app(W_BytearrayObject.descr_len), - __contains__ = interpindirect2app(W_BytearrayObject.descr_contains), + __len__ = interp2app(W_BytearrayObject.descr_len), + __contains__ = interp2app(W_BytearrayObject.descr_contains), - __add__ = interpindirect2app(W_BytearrayObject.descr_add), - __mul__ = interpindirect2app(W_BytearrayObject.descr_mul), - __rmul__ = interpindirect2app(W_BytearrayObject.descr_mul), + __add__ = interp2app(W_BytearrayObject.descr_add), + __mul__ = interp2app(W_BytearrayObject.descr_mul), + __rmul__ = interp2app(W_BytearrayObject.descr_mul), - __getitem__ = interpindirect2app(W_BytearrayObject.descr_getitem), + __getitem__ = interp2app(W_BytearrayObject.descr_getitem), - capitalize = interpindirect2app(W_BytearrayObject.descr_capitalize), - center = interpindirect2app(W_BytearrayObject.descr_center), - count = interpindirect2app(W_BytearrayObject.descr_count), - decode = interpindirect2app(W_BytearrayObject.descr_decode), - expandtabs = interpindirect2app(W_BytearrayObject.descr_expandtabs), - find = interpindirect2app(W_BytearrayObject.descr_find), - rfind = interpindirect2app(W_BytearrayObject.descr_rfind), - index = interpindirect2app(W_BytearrayObject.descr_index), - rindex = interpindirect2app(W_BytearrayObject.descr_rindex), - isalnum = interpindirect2app(W_BytearrayObject.descr_isalnum), - isalpha = interpindirect2app(W_BytearrayObject.descr_isalpha), - isdigit = interpindirect2app(W_BytearrayObject.descr_isdigit), - islower = interpindirect2app(W_BytearrayObject.descr_islower), - isspace = interpindirect2app(W_BytearrayObject.descr_isspace), - istitle = interpindirect2app(W_BytearrayObject.descr_istitle), - isupper = interpindirect2app(W_BytearrayObject.descr_isupper), - join = interpindirect2app(W_BytearrayObject.descr_join), - ljust = interpindirect2app(W_BytearrayObject.descr_ljust), - rjust = interpindirect2app(W_BytearrayObject.descr_rjust), - lower = interpindirect2app(W_BytearrayObject.descr_lower), - partition = interpindirect2app(W_BytearrayObject.descr_partition), - rpartition = interpindirect2app(W_BytearrayObject.descr_rpartition), - replace = interpindirect2app(W_BytearrayObject.descr_replace), - split = interpindirect2app(W_BytearrayObject.descr_split), - rsplit = interpindirect2app(W_BytearrayObject.descr_rsplit), - splitlines = interpindirect2app(W_BytearrayObject.descr_splitlines), - startswith = interpindirect2app(W_BytearrayObject.descr_startswith), - endswith = interpindirect2app(W_BytearrayObject.descr_endswith), - strip = interpindirect2app(W_BytearrayObject.descr_strip), - lstrip = interpindirect2app(W_BytearrayObject.descr_lstrip), - rstrip = interpindirect2app(W_BytearrayObject.descr_rstrip), - swapcase = interpindirect2app(W_BytearrayObject.descr_swapcase), - title = interpindirect2app(W_BytearrayObject.descr_title), - translate = interpindirect2app(W_BytearrayObject.descr_translate), - upper = interpindirect2app(W_BytearrayObject.descr_upper), - zfill = interpindirect2app(W_BytearrayObject.descr_zfill), + capitalize = interp2app(W_BytearrayObject.descr_capitalize), + center = interp2app(W_BytearrayObject.descr_center), + count = interp2app(W_BytearrayObject.descr_count), + decode = interp2app(W_BytearrayObject.descr_decode), + expandtabs = interp2app(W_BytearrayObject.descr_expandtabs), + find = interp2app(W_BytearrayObject.descr_find), + rfind = interp2app(W_BytearrayObject.descr_rfind), + index = interp2app(W_BytearrayObject.descr_index), + rindex = interp2app(W_BytearrayObject.descr_rindex), + isalnum = interp2app(W_BytearrayObject.descr_isalnum), + isalpha = interp2app(W_BytearrayObject.descr_isalpha), + isdigit = interp2app(W_BytearrayObject.descr_isdigit), + islower = interp2app(W_BytearrayObject.descr_islower), + isspace = interp2app(W_BytearrayObject.descr_isspace), + istitle = interp2app(W_BytearrayObject.descr_istitle), + isupper = interp2app(W_BytearrayObject.descr_isupper), + join = interp2app(W_BytearrayObject.descr_join), + ljust = interp2app(W_BytearrayObject.descr_ljust), + rjust = interp2app(W_BytearrayObject.descr_rjust), + lower = interp2app(W_BytearrayObject.descr_lower), + partition = interp2app(W_BytearrayObject.descr_partition), + rpartition = interp2app(W_BytearrayObject.descr_rpartition), + replace = interp2app(W_BytearrayObject.descr_replace), + split = interp2app(W_BytearrayObject.descr_split), + rsplit = interp2app(W_BytearrayObject.descr_rsplit), + splitlines = interp2app(W_BytearrayObject.descr_splitlines), + startswith = interp2app(W_BytearrayObject.descr_startswith), + endswith = interp2app(W_BytearrayObject.descr_endswith), + strip = interp2app(W_BytearrayObject.descr_strip), + lstrip = interp2app(W_BytearrayObject.descr_lstrip), + rstrip = interp2app(W_BytearrayObject.descr_rstrip), + swapcase = interp2app(W_BytearrayObject.descr_swapcase), + title = interp2app(W_BytearrayObject.descr_title), + translate = interp2app(W_BytearrayObject.descr_translate), + upper = interp2app(W_BytearrayObject.descr_upper), + zfill = interp2app(W_BytearrayObject.descr_zfill), __init__ = interp2app(W_BytearrayObject.descr_init), __buffer__ = interp2app(W_BytearrayObject.descr_buffer), diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -2,7 +2,7 @@ from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import operationerrfmt -from pypy.interpreter.gateway import interp2app, interpindirect2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format @@ -262,60 +262,60 @@ __str__ = interp2app(W_BytesObject.descr_str), __hash__ = interp2app(W_BytesObject.descr_hash), - __eq__ = interpindirect2app(W_BytesObject.descr_eq), - __ne__ = interpindirect2app(W_BytesObject.descr_ne), - __lt__ = interpindirect2app(W_BytesObject.descr_lt), - __le__ = interpindirect2app(W_BytesObject.descr_le), - __gt__ = interpindirect2app(W_BytesObject.descr_gt), - __ge__ = interpindirect2app(W_BytesObject.descr_ge), + __eq__ = interp2app(W_BytesObject.descr_eq), + __ne__ = interp2app(W_BytesObject.descr_ne), + __lt__ = interp2app(W_BytesObject.descr_lt), + __le__ = interp2app(W_BytesObject.descr_le), + __gt__ = interp2app(W_BytesObject.descr_gt), + __ge__ = interp2app(W_BytesObject.descr_ge), - __len__ = interpindirect2app(W_BytesObject.descr_len), - __contains__ = interpindirect2app(W_BytesObject.descr_contains), + __len__ = interp2app(W_BytesObject.descr_len), + __contains__ = interp2app(W_BytesObject.descr_contains), - __add__ = interpindirect2app(W_BytesObject.descr_add), - __mul__ = interpindirect2app(W_BytesObject.descr_mul), - __rmul__ = interpindirect2app(W_BytesObject.descr_mul), + __add__ = interp2app(W_BytesObject.descr_add), + __mul__ = interp2app(W_BytesObject.descr_mul), + __rmul__ = interp2app(W_BytesObject.descr_mul), - __getitem__ = interpindirect2app(W_BytesObject.descr_getitem), - __getslice__ = interpindirect2app(W_BytesObject.descr_getslice), + __getitem__ = interp2app(W_BytesObject.descr_getitem), + __getslice__ = interp2app(W_BytesObject.descr_getslice), - capitalize = interpindirect2app(W_BytesObject.descr_capitalize), - center = interpindirect2app(W_BytesObject.descr_center), - count = interpindirect2app(W_BytesObject.descr_count), - decode = interpindirect2app(W_BytesObject.descr_decode), - encode = interpindirect2app(W_BytesObject.descr_encode), - expandtabs = interpindirect2app(W_BytesObject.descr_expandtabs), - find = interpindirect2app(W_BytesObject.descr_find), - rfind = interpindirect2app(W_BytesObject.descr_rfind), - index = interpindirect2app(W_BytesObject.descr_index), - rindex = interpindirect2app(W_BytesObject.descr_rindex), - isalnum = interpindirect2app(W_BytesObject.descr_isalnum), - isalpha = interpindirect2app(W_BytesObject.descr_isalpha), - isdigit = interpindirect2app(W_BytesObject.descr_isdigit), - islower = interpindirect2app(W_BytesObject.descr_islower), - isspace = interpindirect2app(W_BytesObject.descr_isspace), - istitle = interpindirect2app(W_BytesObject.descr_istitle), - isupper = interpindirect2app(W_BytesObject.descr_isupper), - join = interpindirect2app(W_BytesObject.descr_join), - ljust = interpindirect2app(W_BytesObject.descr_ljust), - rjust = interpindirect2app(W_BytesObject.descr_rjust), - lower = interpindirect2app(W_BytesObject.descr_lower), - partition = interpindirect2app(W_BytesObject.descr_partition), - rpartition = interpindirect2app(W_BytesObject.descr_rpartition), - replace = interpindirect2app(W_BytesObject.descr_replace), - split = interpindirect2app(W_BytesObject.descr_split), - rsplit = interpindirect2app(W_BytesObject.descr_rsplit), - splitlines = interpindirect2app(W_BytesObject.descr_splitlines), - startswith = interpindirect2app(W_BytesObject.descr_startswith), - endswith = interpindirect2app(W_BytesObject.descr_endswith), - strip = interpindirect2app(W_BytesObject.descr_strip), - lstrip = interpindirect2app(W_BytesObject.descr_lstrip), - rstrip = interpindirect2app(W_BytesObject.descr_rstrip), - swapcase = interpindirect2app(W_BytesObject.descr_swapcase), - title = interpindirect2app(W_BytesObject.descr_title), - translate = interpindirect2app(W_BytesObject.descr_translate), - upper = interpindirect2app(W_BytesObject.descr_upper), - zfill = interpindirect2app(W_BytesObject.descr_zfill), + capitalize = interp2app(W_BytesObject.descr_capitalize), + center = interp2app(W_BytesObject.descr_center), + count = interp2app(W_BytesObject.descr_count), + decode = interp2app(W_BytesObject.descr_decode), + encode = interp2app(W_BytesObject.descr_encode), + expandtabs = interp2app(W_BytesObject.descr_expandtabs), + find = interp2app(W_BytesObject.descr_find), + rfind = interp2app(W_BytesObject.descr_rfind), + index = interp2app(W_BytesObject.descr_index), + rindex = interp2app(W_BytesObject.descr_rindex), + isalnum = interp2app(W_BytesObject.descr_isalnum), + isalpha = interp2app(W_BytesObject.descr_isalpha), + isdigit = interp2app(W_BytesObject.descr_isdigit), + islower = interp2app(W_BytesObject.descr_islower), + isspace = interp2app(W_BytesObject.descr_isspace), + istitle = interp2app(W_BytesObject.descr_istitle), + isupper = interp2app(W_BytesObject.descr_isupper), + join = interp2app(W_BytesObject.descr_join), + ljust = interp2app(W_BytesObject.descr_ljust), + rjust = interp2app(W_BytesObject.descr_rjust), + lower = interp2app(W_BytesObject.descr_lower), + partition = interp2app(W_BytesObject.descr_partition), + rpartition = interp2app(W_BytesObject.descr_rpartition), + replace = interp2app(W_BytesObject.descr_replace), + split = interp2app(W_BytesObject.descr_split), + rsplit = interp2app(W_BytesObject.descr_rsplit), + splitlines = interp2app(W_BytesObject.descr_splitlines), + startswith = interp2app(W_BytesObject.descr_startswith), + endswith = interp2app(W_BytesObject.descr_endswith), + strip = interp2app(W_BytesObject.descr_strip), + lstrip = interp2app(W_BytesObject.descr_lstrip), + rstrip = interp2app(W_BytesObject.descr_rstrip), + swapcase = interp2app(W_BytesObject.descr_swapcase), + title = interp2app(W_BytesObject.descr_title), + translate = interp2app(W_BytesObject.descr_translate), + upper = interp2app(W_BytesObject.descr_upper), + zfill = interp2app(W_BytesObject.descr_zfill), format = interp2app(W_BytesObject.descr_format), __format__ = interp2app(W_BytesObject.descr__format__), diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -2,7 +2,7 @@ from pypy.interpreter import unicodehelper from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, interpindirect2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.unicodedata import unicodedb from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef @@ -403,60 +403,60 @@ __str__ = interp2app(W_UnicodeObject.descr_str), __hash__ = interp2app(W_UnicodeObject.descr_hash), - __eq__ = interpindirect2app(W_UnicodeObject.descr_eq), - __ne__ = interpindirect2app(W_UnicodeObject.descr_ne), - __lt__ = interpindirect2app(W_UnicodeObject.descr_lt), - __le__ = interpindirect2app(W_UnicodeObject.descr_le), - __gt__ = interpindirect2app(W_UnicodeObject.descr_gt), - __ge__ = interpindirect2app(W_UnicodeObject.descr_ge), + __eq__ = interp2app(W_UnicodeObject.descr_eq), + __ne__ = interp2app(W_UnicodeObject.descr_ne), + __lt__ = interp2app(W_UnicodeObject.descr_lt), + __le__ = interp2app(W_UnicodeObject.descr_le), + __gt__ = interp2app(W_UnicodeObject.descr_gt), + __ge__ = interp2app(W_UnicodeObject.descr_ge), - __len__ = interpindirect2app(W_UnicodeObject.descr_len), - __contains__ = interpindirect2app(W_UnicodeObject.descr_contains), + __len__ = interp2app(W_UnicodeObject.descr_len), + __contains__ = interp2app(W_UnicodeObject.descr_contains), - __add__ = interpindirect2app(W_UnicodeObject.descr_add), - __mul__ = interpindirect2app(W_UnicodeObject.descr_mul), - __rmul__ = interpindirect2app(W_UnicodeObject.descr_mul), + __add__ = interp2app(W_UnicodeObject.descr_add), + __mul__ = interp2app(W_UnicodeObject.descr_mul), + __rmul__ = interp2app(W_UnicodeObject.descr_mul), - __getitem__ = interpindirect2app(W_UnicodeObject.descr_getitem), - __getslice__ = interpindirect2app(W_UnicodeObject.descr_getslice), + __getitem__ = interp2app(W_UnicodeObject.descr_getitem), + __getslice__ = interp2app(W_UnicodeObject.descr_getslice), - capitalize = interpindirect2app(W_UnicodeObject.descr_capitalize), - center = interpindirect2app(W_UnicodeObject.descr_center), - count = interpindirect2app(W_UnicodeObject.descr_count), - decode = interpindirect2app(W_UnicodeObject.descr_decode), - encode = interpindirect2app(W_UnicodeObject.descr_encode), - expandtabs = interpindirect2app(W_UnicodeObject.descr_expandtabs), - find = interpindirect2app(W_UnicodeObject.descr_find), - rfind = interpindirect2app(W_UnicodeObject.descr_rfind), - index = interpindirect2app(W_UnicodeObject.descr_index), - rindex = interpindirect2app(W_UnicodeObject.descr_rindex), - isalnum = interpindirect2app(W_UnicodeObject.descr_isalnum), - isalpha = interpindirect2app(W_UnicodeObject.descr_isalpha), - isdigit = interpindirect2app(W_UnicodeObject.descr_isdigit), - islower = interpindirect2app(W_UnicodeObject.descr_islower), - isspace = interpindirect2app(W_UnicodeObject.descr_isspace), - istitle = interpindirect2app(W_UnicodeObject.descr_istitle), - isupper = interpindirect2app(W_UnicodeObject.descr_isupper), - join = interpindirect2app(W_UnicodeObject.descr_join), - ljust = interpindirect2app(W_UnicodeObject.descr_ljust), - rjust = interpindirect2app(W_UnicodeObject.descr_rjust), - lower = interpindirect2app(W_UnicodeObject.descr_lower), - partition = interpindirect2app(W_UnicodeObject.descr_partition), - rpartition = interpindirect2app(W_UnicodeObject.descr_rpartition), - replace = interpindirect2app(W_UnicodeObject.descr_replace), - split = interpindirect2app(W_UnicodeObject.descr_split), - rsplit = interpindirect2app(W_UnicodeObject.descr_rsplit), - splitlines = interpindirect2app(W_UnicodeObject.descr_splitlines), - startswith = interpindirect2app(W_UnicodeObject.descr_startswith), - endswith = interpindirect2app(W_UnicodeObject.descr_endswith), - strip = interpindirect2app(W_UnicodeObject.descr_strip), - lstrip = interpindirect2app(W_UnicodeObject.descr_lstrip), - rstrip = interpindirect2app(W_UnicodeObject.descr_rstrip), - swapcase = interpindirect2app(W_UnicodeObject.descr_swapcase), - title = interpindirect2app(W_UnicodeObject.descr_title), - translate = interpindirect2app(W_UnicodeObject.descr_translate), - upper = interpindirect2app(W_UnicodeObject.descr_upper), - zfill = interpindirect2app(W_UnicodeObject.descr_zfill), + capitalize = interp2app(W_UnicodeObject.descr_capitalize), + center = interp2app(W_UnicodeObject.descr_center), + count = interp2app(W_UnicodeObject.descr_count), + decode = interp2app(W_UnicodeObject.descr_decode), + encode = interp2app(W_UnicodeObject.descr_encode), + expandtabs = interp2app(W_UnicodeObject.descr_expandtabs), + find = interp2app(W_UnicodeObject.descr_find), + rfind = interp2app(W_UnicodeObject.descr_rfind), + index = interp2app(W_UnicodeObject.descr_index), + rindex = interp2app(W_UnicodeObject.descr_rindex), + isalnum = interp2app(W_UnicodeObject.descr_isalnum), + isalpha = interp2app(W_UnicodeObject.descr_isalpha), + isdigit = interp2app(W_UnicodeObject.descr_isdigit), + islower = interp2app(W_UnicodeObject.descr_islower), + isspace = interp2app(W_UnicodeObject.descr_isspace), + istitle = interp2app(W_UnicodeObject.descr_istitle), + isupper = interp2app(W_UnicodeObject.descr_isupper), + join = interp2app(W_UnicodeObject.descr_join), + ljust = interp2app(W_UnicodeObject.descr_ljust), + rjust = interp2app(W_UnicodeObject.descr_rjust), + lower = interp2app(W_UnicodeObject.descr_lower), + partition = interp2app(W_UnicodeObject.descr_partition), + rpartition = interp2app(W_UnicodeObject.descr_rpartition), + replace = interp2app(W_UnicodeObject.descr_replace), + split = interp2app(W_UnicodeObject.descr_split), + rsplit = interp2app(W_UnicodeObject.descr_rsplit), + splitlines = interp2app(W_UnicodeObject.descr_splitlines), + startswith = interp2app(W_UnicodeObject.descr_startswith), + endswith = interp2app(W_UnicodeObject.descr_endswith), + strip = interp2app(W_UnicodeObject.descr_strip), + lstrip = interp2app(W_UnicodeObject.descr_lstrip), + rstrip = interp2app(W_UnicodeObject.descr_rstrip), + swapcase = interp2app(W_UnicodeObject.descr_swapcase), + title = interp2app(W_UnicodeObject.descr_title), + translate = interp2app(W_UnicodeObject.descr_translate), + upper = interp2app(W_UnicodeObject.descr_upper), + zfill = interp2app(W_UnicodeObject.descr_zfill), format = interp2app(W_UnicodeObject.descr_format), __format__ = interp2app(W_UnicodeObject.descr__format__), From noreply at buildbot.pypy.org Thu Jul 25 19:00:45 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 19:00:45 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add '@specialize.argtype(0)' to each descr_*() method in StringMethods. Message-ID: <20130725170045.4330B1C13FC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65652:f280f4ca6565 Date: 2013-07-25 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/f280f4ca6565/ Log: Add '@specialize.argtype(0)' to each descr_*() method in StringMethods. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -27,6 +27,7 @@ space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) + @specialize.argtype(0) def descr_eq(self, space, w_other): try: return space.newbool(self._val(space) == self._op_val(space, w_other)) @@ -42,6 +43,7 @@ return space.w_False raise + @specialize.argtype(0) def descr_ne(self, space, w_other): try: return space.newbool(self._val(space) != self._op_val(space, w_other)) @@ -57,6 +59,7 @@ return space.w_True raise + @specialize.argtype(0) def descr_lt(self, space, w_other): try: return space.newbool(self._val(space) < self._op_val(space, w_other)) @@ -64,6 +67,7 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented + @specialize.argtype(0) def descr_le(self, space, w_other): try: return space.newbool(self._val(space) <= self._op_val(space, w_other)) @@ -71,6 +75,7 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented + @specialize.argtype(0) def descr_gt(self, space, w_other): try: return space.newbool(self._val(space) > self._op_val(space, w_other)) @@ -78,6 +83,7 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented + @specialize.argtype(0) def descr_ge(self, space, w_other): try: return space.newbool(self._val(space) >= self._op_val(space, w_other)) @@ -85,12 +91,15 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented + @specialize.argtype(0) def descr_len(self, space): return space.wrap(self._len()) + @specialize.argtype(0) #def descr_iter(self, space): # pass + @specialize.argtype(0) def descr_contains(self, space, w_sub): from pypy.objspace.std.bytearrayobject import W_BytearrayObject if (isinstance(self, W_BytearrayObject) and @@ -105,9 +114,11 @@ return space.w_False return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) + @specialize.argtype(0) def descr_add(self, space, w_other): return self._new(self._val(space) + self._op_val(space, w_other)) + @specialize.argtype(0) def descr_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) @@ -121,6 +132,7 @@ return self._new(self._val(space)[0] * times) return self._new(self._val(space) * times) + @specialize.argtype(0) def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): selfvalue = self._val(space) @@ -149,6 +161,7 @@ #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) + @specialize.argtype(0) def descr_getslice(self, space, w_start, w_stop): selfvalue = self._val(space) start, stop = normalize_simple_slice(space, len(selfvalue), w_start, @@ -158,6 +171,7 @@ else: return self._sliced(space, selfvalue, start, stop, self) + @specialize.argtype(0) def descr_capitalize(self, space): value = self._val(space) if len(value) == 0: @@ -170,6 +184,7 @@ return self._new(builder.build()) @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) + @specialize.argtype(0) def descr_center(self, space, width, w_fillchar): value = self._val(space) fillchar = self._op_val(space, w_fillchar) @@ -187,10 +202,12 @@ return self._new(u_centered) + @specialize.argtype(0) def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) return wrapint(space, value.count(self._op_val(space, w_sub), start, end)) + @specialize.argtype(0) def descr_decode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ unicode_from_string, decode_object @@ -199,6 +216,7 @@ return unicode_from_string(space, self) return decode_object(space, self, encoding, errors) + @specialize.argtype(0) def descr_encode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ encode_object @@ -206,6 +224,7 @@ return encode_object(space, self, encoding, errors) @unwrap_spec(tabsize=int) + @specialize.argtype(0) def descr_expandtabs(self, space, tabsize=8): value = self._val(space) if not value: @@ -248,16 +267,19 @@ return distance + @specialize.argtype(0) def descr_find(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) return space.wrap(res) + @specialize.argtype(0) def descr_rfind(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) return space.wrap(res) + @specialize.argtype(0) def descr_index(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) @@ -267,6 +289,7 @@ return space.wrap(res) + @specialize.argtype(0) def descr_rindex(self, space, w_sub, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) @@ -296,15 +319,19 @@ return space.w_False return space.w_True + @specialize.argtype(0) def descr_isalnum(self, space): return self._is_generic(space, '_isalnum') + @specialize.argtype(0) def descr_isalpha(self, space): return self._is_generic(space, '_isalpha') + @specialize.argtype(0) def descr_isdigit(self, space): return self._is_generic(space, '_isdigit') + @specialize.argtype(0) def descr_islower(self, space): v = self._val(space) if len(v) == 1: @@ -318,9 +345,11 @@ cased = True return space.newbool(cased) + @specialize.argtype(0) def descr_isspace(self, space): return self._is_generic(space, '_isspace') + @specialize.argtype(0) def descr_istitle(self, space): input = self._val(space) cased = False @@ -342,6 +371,7 @@ return space.newbool(cased) + @specialize.argtype(0) def descr_isupper(self, space): v = self._val(space) if len(v) == 1: @@ -355,6 +385,7 @@ cased = True return space.newbool(cased) + @specialize.argtype(0) def descr_join(self, space, w_list): #l = space.listview_str(w_list) #if l is not None: @@ -405,6 +436,7 @@ assert False, 'unreachable' @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) + @specialize.argtype(0) def descr_ljust(self, space, width, w_fillchar): value = self._val(space) fillchar = self._op_val(space, w_fillchar) @@ -420,6 +452,7 @@ return self._new(value) @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) + @specialize.argtype(0) def descr_rjust(self, space, width, w_fillchar): value = self._val(space) fillchar = self._op_val(space, w_fillchar) @@ -434,6 +467,7 @@ return self._new(value) + @specialize.argtype(0) def descr_lower(self, space): value = self._val(space) builder = self._builder(len(value)) @@ -441,6 +475,7 @@ builder.append(self._lower(value[i])) return self._new(builder.build()) + @specialize.argtype(0) def descr_partition(self, space, w_sub): value = self._val(space) sub = self._op_val(space, w_sub) @@ -458,6 +493,7 @@ [self._sliced(space, value, 0, pos, value), w_sub, self._sliced(space, value, pos+len(sub), len(value), value)]) + @specialize.argtype(0) def descr_rpartition(self, space, w_sub): value = self._val(space) sub = self._op_val(space, w_sub) @@ -476,6 +512,7 @@ self._sliced(space, value, pos+len(sub), len(value), value)]) @unwrap_spec(count=int) + @specialize.argtype(0) def descr_replace(self, space, w_old, w_new, count=-1): input = self._val(space) sub = self._op_val(space, w_old) @@ -488,6 +525,7 @@ return self._new(res) @unwrap_spec(maxsplit=int) + @specialize.argtype(0) def descr_split(self, space, w_sep=None, maxsplit=-1): res = [] value = self._val(space) @@ -528,6 +566,7 @@ return self._newlist_unwrapped(space, res) @unwrap_spec(maxsplit=int) + @specialize.argtype(0) def descr_rsplit(self, space, w_sep=None, maxsplit=-1): res = [] value = self._val(space) @@ -571,6 +610,7 @@ return self._newlist_unwrapped(space, res) @unwrap_spec(keepends=bool) + @specialize.argtype(0) def descr_splitlines(self, space, keepends=False): data = self._val(space) selflen = len(data) @@ -594,6 +634,7 @@ strs.append(data[j:len(data)]) return self._newlist_unwrapped(space, strs) + @specialize.argtype(0) def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end, True) @@ -607,6 +648,7 @@ def _startswith(self, space, value, w_prefix, start, end): return startswith(value, self._op_val(space, w_prefix), start, end) + @specialize.argtype(0) def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end, True) @@ -660,21 +702,25 @@ assert rpos >= lpos # annotator hint, don't remove return self._sliced(space, value, lpos, rpos, self) + @specialize.argtype(0) def descr_strip(self, space, w_chars=None): if space.is_none(w_chars): return self._strip_none(space, left=1, right=1) return self._strip(space, w_chars, left=1, right=1) + @specialize.argtype(0) def descr_lstrip(self, space, w_chars=None): if space.is_none(w_chars): return self._strip_none(space, left=1, right=0) return self._strip(space, w_chars, left=1, right=0) + @specialize.argtype(0) def descr_rstrip(self, space, w_chars=None): if space.is_none(w_chars): return self._strip_none(space, left=0, right=1) return self._strip(space, w_chars, left=0, right=1) + @specialize.argtype(0) def descr_swapcase(self, space): selfvalue = self._val(space) builder = self._builder(len(selfvalue)) @@ -688,6 +734,7 @@ builder.append(ch) return self._new(builder.build()) + @specialize.argtype(0) def descr_title(self, space): selfval = self._val(space) if len(selfval) == 0: @@ -708,6 +755,7 @@ # for bytes and bytearray, overridden by unicode @unwrap_spec(w_deletechars=WrappedDefault('')) + @specialize.argtype(0) def descr_translate(self, space, w_table, w_deletechars): if space.is_w(w_table, space.w_None): table = self.DEFAULT_NOOP_TABLE @@ -734,6 +782,7 @@ buf.append(table[ord(char)]) return self._new(buf.build()) + @specialize.argtype(0) def descr_upper(self, space): value = self._val(space) builder = self._builder(len(value)) @@ -742,6 +791,7 @@ return self._new(builder.build()) @unwrap_spec(width=int) + @specialize.argtype(0) def descr_zfill(self, space, width): selfval = self._val(space) if len(selfval) == 0: @@ -762,5 +812,6 @@ builder.append_slice(selfval, start, len(selfval)) return self._new(builder.build()) + @specialize.argtype(0) def descr_getnewargs(self, space): return space.newtuple([self._new(self._val(space))]) From noreply at buildbot.pypy.org Thu Jul 25 19:03:36 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 19:03:36 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Inline unicode_from_object2(). Message-ID: <20130725170336.082101C13FC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65653:4b34bd28d384 Date: 2013-07-25 19:01 +0200 http://bitbucket.org/pypy/pypy/changeset/4b34bd28d384/ Log: Inline unicode_from_object2(). diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -79,8 +79,9 @@ return self._value def _op_val(self, space, w_other): - return unicode_from_object2(space, w_other)._value - #return w_other._value + if isinstance(w_other, W_UnicodeObject): + return w_other._value + return unicode_from_encoded_object(space, w_other, None, "strict")._value def _chr(self, char): return unicode(char) @@ -334,14 +335,6 @@ return w_res return unicode_from_encoded_object(space, w_res, None, "strict") -# XXX refactor / rename / share with unicode_from_object -def unicode_from_object2(space, w_obj): - if space.is_w(space.type(w_obj), space.w_unicode): - return w_obj - elif isinstance(w_obj, W_UnicodeObject): - return W_UnicodeObject(w_obj._value) - return unicode_from_encoded_object(space, w_obj, None, "strict") - def unicode_from_string(space, w_str): # this is a performance and bootstrapping hack encoding = getdefaultencoding(space) From noreply at buildbot.pypy.org Thu Jul 25 19:29:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 19:29:03 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: in-progress Message-ID: <20130725172903.12C531C101E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r440:0c2bd2af9b0f Date: 2013-07-25 18:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/0c2bd2af9b0f/ Log: in-progress diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -237,8 +237,13 @@ obj->h_tid |= GCFLAG_MOVED; /* copy the object's content */ - dprintf(("copy %p over %p\n", obj, id_copy)); - memcpy(id_copy + 1, obj + 1, stmgc_size(obj) - sizeof(struct stm_object_s)); + size_t objsize; + if (obj->h_tid & GCFLAG_STUB) + objsize = sizeof(struct stm_stub_s); + else + objsize = stmgc_size(obj); + dprintf(("copy %p over %p (%ld bytes)\n", obj, id_copy, objsize)); + memcpy(id_copy + 1, obj + 1, objsize - sizeof(struct stm_object_s)); /* copy the object's h_revision number */ id_copy->h_revision = obj->h_revision; @@ -346,7 +351,8 @@ /* return this original */ original->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, original); + if (!(original->h_tid & GCFLAG_STUB)) + gcptrlist_insert(&objects_to_trace, original); return original; } diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -303,19 +303,14 @@ major_collect() check_prebuilt(p1) check_free_old(p2) - check_not_free(p3) # XXX replace with p1 + check_free_old(p3) -def test_prebuilt_version_2_copy_over_prebuilt(): +def test_prebuilt_with_hash(): p1 = lib.pseudoprebuilt_with_hash(HDR, 42 + HDR, 99) p2 = oalloc(HDR); make_public(p2) p3 = oalloc(HDR); make_public(p3) delegate(p1, p2) - delegate_original(p1, p2) delegate(p2, p3) - delegate_original(p1, p3) - # added by delegate, remove, otherwise - # major_collect will not copy over prebuilt p1: - p1.h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE major_collect() check_prebuilt(p1) assert lib.stm_hash(p1) == 99 From noreply at buildbot.pypy.org Thu Jul 25 19:29:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 19:29:04 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix Message-ID: <20130725172904.498AE1C101E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r441:102fc9863345 Date: 2013-07-25 18:13 +0200 http://bitbucket.org/pypy/stmgc/changeset/102fc9863345/ Log: Fix diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -540,7 +540,8 @@ if (obj->h_tid & GCFLAG_MOVED) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - obj = (gcptr)obj->h_revision; + assert(IS_POINTER(obj->h_original)); + obj = (gcptr)obj->h_original; items[i] = obj; } else if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { From noreply at buildbot.pypy.org Thu Jul 25 19:29:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 25 Jul 2013 19:29:05 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Use the dbgmem.c logic for the shadowstack too Message-ID: <20130725172905.71BB81C101E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r442:5e88d98b36de Date: 2013-07-25 19:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/5e88d98b36de/ Log: Use the dbgmem.c logic for the shadowstack too diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -52,7 +52,7 @@ static void init_shadowstack(void) { struct tx_descriptor *d = thread_descriptor; - d->shadowstack = malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); + d->shadowstack = stm_malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); if (!d->shadowstack) { stm_fatalerror("out of memory: shadowstack\n"); } @@ -68,7 +68,7 @@ assert(x == END_MARKER_ON); assert(stm_shadowstack == d->shadowstack); stm_shadowstack = NULL; - free(d->shadowstack); + stm_free(d->shadowstack, sizeof(gcptr) * LENGTH_SHADOW_STACK); } void stm_set_max_aborts(int max_aborts) From noreply at buildbot.pypy.org Thu Jul 25 19:40:34 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 19:40:34 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Remove use of unicode.is* which isn't supported in RPython. Message-ID: <20130725174034.51AAB1C101E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65654:cfc74ab95eda Date: 2013-07-25 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/cfc74ab95eda/ Log: Remove use of unicode.is* which isn't supported in RPython. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -336,12 +336,12 @@ v = self._val(space) if len(v) == 1: c = v[0] - return space.newbool(c.islower()) + return space.newbool(self._islower(c)) cased = False for idx in range(len(v)): - if v[idx].isupper(): + if self._isupper(v[idx]): return space.w_False - elif not cased and v[idx].islower(): + elif not cased and self._islower(v[idx]): cased = True return space.newbool(cased) @@ -357,12 +357,12 @@ for pos in range(0, len(input)): ch = input[pos] - if ch.isupper(): + if self._isupper(ch): if previous_is_cased: return space.w_False previous_is_cased = True cased = True - elif ch.islower(): + elif self._islower(ch): if not previous_is_cased: return space.w_False cased = True @@ -376,12 +376,12 @@ v = self._val(space) if len(v) == 1: c = v[0] - return space.newbool(c.isupper()) + return space.newbool(self._isupper(c)) cased = False for idx in range(len(v)): - if v[idx].islower(): + if self._islower(v[idx]): return space.w_False - elif not cased and v[idx].isupper(): + elif not cased and self._isupper(v[idx]): cased = True return space.newbool(cased) @@ -535,7 +535,7 @@ while True: # find the beginning of the next word while i < length: - if not value[i].isspace(): + if not self._isspace(value[i]): break # found i += 1 else: @@ -546,7 +546,7 @@ j = length # take all the rest of the string else: j = i + 1 - while j < length and not value[j].isspace(): + while j < length and not self._isspace(value[j]): j += 1 maxsplit -= 1 # NB. if it's already < 0, it stays < 0 @@ -575,7 +575,7 @@ while True: # starting from the end, find the end of the next word while i >= 0: - if not value[i].isspace(): + if not self._isspace(value[i]): break # found i -= 1 else: @@ -587,7 +587,7 @@ j = -1 # take all the rest of the string else: j = i - 1 - while j >= 0 and not value[j].isspace(): + while j >= 0 and not self._isspace(value[j]): j -= 1 maxsplit -= 1 # NB. if it's already < 0, it stays < 0 @@ -692,11 +692,11 @@ if left: #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, value[lpos],w_chars) - while lpos < rpos and value[lpos].isspace(): + while lpos < rpos and self._isspace(value[lpos]): lpos += 1 if right: - while rpos > lpos and value[rpos - 1].isspace(): + while rpos > lpos and self._isspace(value[rpos - 1]): rpos -= 1 assert rpos >= lpos # annotator hint, don't remove diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -89,25 +89,25 @@ _builder = UnicodeBuilder def _isupper(self, ch): - return ch.isupper() + return unicodedb.isupper(ord(ch)) def _islower(self, ch): - return ch.islower() + return unicodedb.islower(ord(ch)) def _istitle(self, ch): - return ch.istitle() + return unicodedb.istitle(ord(ch)) def _isspace(self, ch): - return ch.isspace() + return unicodedb.isspace(ord(ch)) def _isalpha(self, ch): - return ch.isalpha() + return unicodedb.isalpha(ord(ch)) def _isalnum(self, ch): - return ch.isalnum() + return unicodedb.isalnum(ord(ch)) def _isdigit(self, ch): - return ch.isdigit() + return unicodedb.isdigit(ord(ch)) def _iscased(self, ch): return unicodedb.iscased(ord(ch)) From noreply at buildbot.pypy.org Thu Jul 25 19:40:35 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 19:40:35 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Make the annotator happy. Message-ID: <20130725174035.9361A1C101E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65655:e38a40e646e8 Date: 2013-07-25 19:38 +0200 http://bitbucket.org/pypy/pypy/changeset/e38a40e646e8/ Log: Make the annotator happy. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -29,7 +29,7 @@ return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def _new(self, value): - return W_BytearrayObject(value) + return W_BytearrayObject(list(value)) def _len(self): return len(self.data) @@ -41,7 +41,8 @@ return space.bufferstr_new_w(w_other) def _chr(self, char): - return str(char) + assert len(char) == 1 + return str(char)[0] _builder = StringBuilder diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -83,7 +83,8 @@ #return w_other._value def _chr(self, char): - return str(char) + assert len(char) == 1 + return str(char)[0] _builder = StringBuilder diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -84,7 +84,8 @@ return unicode_from_encoded_object(space, w_other, None, "strict")._value def _chr(self, char): - return unicode(char) + assert len(char) == 1 + return unicode(char)[0] _builder = UnicodeBuilder From noreply at buildbot.pypy.org Thu Jul 25 19:56:21 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 25 Jul 2013 19:56:21 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: hg rm pypy/module/clr/ Message-ID: <20130725175621.819641C101E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65656:9483b6d294d3 Date: 2013-07-25 18:10 +0100 http://bitbucket.org/pypy/pypy/changeset/9483b6d294d3/ Log: hg rm pypy/module/clr/ diff --git a/pypy/module/clr/__init__.py b/pypy/module/clr/__init__.py deleted file mode 100644 --- a/pypy/module/clr/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Package initialisation -from pypy.interpreter.mixedmodule import MixedModule - -import boxing_rules # with side effects - -class Module(MixedModule): - """CLR module""" - - appleveldefs = { - 'dotnetimporter': 'app_importer.importer' - } - - interpleveldefs = { - '_CliObject_internal': 'interp_clr.W_CliObject', - 'call_staticmethod': 'interp_clr.call_staticmethod', - 'load_cli_class': 'interp_clr.load_cli_class', - 'get_assemblies_info': 'interp_clr.get_assemblies_info', - 'AddReferenceByPartialName': 'interp_clr.AddReferenceByPartialName', - } - - def startup(self, space): - self.space.appexec([self], """(clr_module): - import sys - clr_module.get_assemblies_info() # load info for std assemblies - sys.meta_path.append(clr_module.dotnetimporter()) - """) diff --git a/pypy/module/clr/app_clr.py b/pypy/module/clr/app_clr.py deleted file mode 100644 --- a/pypy/module/clr/app_clr.py +++ /dev/null @@ -1,204 +0,0 @@ -# NOT_RPYTHON - -class StaticMethodWrapper(object): - __slots__ = ('class_name', 'meth_name',) - - def __init__(self, class_name, meth_name): - self.class_name = class_name - self.meth_name = meth_name - - def __call__(self, *args): - import clr - return clr.call_staticmethod(self.class_name, self.meth_name, args) - - def __repr__(self): - return '' % (self.class_name, self.meth_name) - - -class MethodWrapper(object): - __slots__ = ('meth_name',) - - def __init__(self, meth_name): - self.meth_name = meth_name - - def __get__(self, obj, type_): - if obj is None: - return UnboundMethod(type_, self.meth_name) - else: - return BoundMethod(self.meth_name, obj) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, repr(self.meth_name)) - - -class UnboundMethod(object): - __slots__ = ('im_class', 'im_name') - - def __init__(self, im_class, im_name): - self.im_class = im_class - self.im_name = im_name - - def __raise_TypeError(self, thing): - raise TypeError, 'unbound method %s() must be called with %s ' \ - 'instance as first argument (got %s instead)' % \ - (self.im_name, self.im_class.__cliclass__, thing) - - def __call__(self, *args): - if len(args) == 0: - self.__raise_TypeError('nothing') - im_self = args[0] - if not isinstance(im_self, self.im_class): - self.__raise_TypeError('%s instance' % im_self.__class__.__name__) - return im_self.__cliobj__.call_method(self.im_name, args, 1) # ignore the first arg - - def __repr__(self): - return '' % (self.im_class.__cliclass__, self.im_name) - - -class BoundMethod(object): - __slots__ = ('im_name', 'im_self') - - def __init__(self, im_name, im_self): - self.im_name = im_name - self.im_self = im_self - - def __call__(self, *args): - return self.im_self.__cliobj__.call_method(self.im_name, args) - - def __repr__(self): - return '' % (self.im_self.__class__.__cliclass__, - self.im_name, - self.im_self) - -class StaticProperty(object): - def __init__(self, fget=None, fset=None): - self.fget = fget - self.fset = fset - - def __get__(self, obj, type_): - return self.fget() - -def _qualify(t): - mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' - return '%s, %s' % (t, mscorlib) - -class MetaGenericCliClassWrapper(type): - _cli_types = { - int: _qualify('System.Int32'), - str: _qualify('System.String'), - bool: _qualify('System.Boolean'), - float: _qualify('System.Double'), - } - _System_Object = _qualify('System.Object') - - def _cli_name(cls, ttype): - if isinstance(ttype, MetaCliClassWrapper): - return '[%s]' % ttype.__fullyqualifiedname__ - else: - return '[%s]' % cls._cli_types.get(ttype, cls._System_Object) - - def __setattr__(cls, name, value): - obj = cls.__dict__.get(name, None) - if isinstance(obj, StaticProperty): - obj.fset(value) - else: - type.__setattr__(cls, name, value) - - def __getitem__(cls, type_or_tuple): - import clr - if isinstance(type_or_tuple, tuple): - types = type_or_tuple - else: - types = (type_or_tuple,) - namespace, generic_class = cls.__cliclass__.rsplit('.', 1) - generic_params = [cls._cli_name(t) for t in types] - instance_class = '%s[%s]' % (generic_class, ','.join(generic_params)) - try: - return clr.load_cli_class(cls.__assemblyname__, namespace, instance_class) - except ImportError: - raise TypeError, "Cannot load type %s.%s" % (namespace, instance_class) - -class MetaCliClassWrapper(type): - def __setattr__(cls, name, value): - obj = cls.__dict__.get(name, None) - if isinstance(obj, StaticProperty): - obj.fset(value) - else: - type.__setattr__(cls, name, value) - -class CliClassWrapper(object): - __slots__ = ('__cliobj__',) - - def __init__(self, *args): - import clr - self.__cliobj__ = clr._CliObject_internal(self.__fullyqualifiedname__, args) - - -class IEnumeratorWrapper(object): - def __init__(self, enumerator): - self.__enumerator__ = enumerator - - def __iter__(self): - return self - - def next(self): - if not self.__enumerator__.MoveNext(): - raise StopIteration - return self.__enumerator__.Current - -# this method need to be attached only to classes that implements IEnumerable (see build_wrapper) -def __iter__(self): - return IEnumeratorWrapper(self.GetEnumerator()) - -def wrapper_from_cliobj(cls, cliobj): - obj = cls.__new__(cls) - obj.__cliobj__ = cliobj - return obj - -def build_wrapper(namespace, classname, assemblyname, - staticmethods, methods, properties, indexers, - hasIEnumerable, isClassGeneric): - fullname = '%s.%s' % (namespace, classname) - assembly_qualified_name = '%s, %s' % (fullname, assemblyname) - d = {'__cliclass__': fullname, - '__fullyqualifiedname__': assembly_qualified_name, - '__assemblyname__': assemblyname, - '__module__': namespace} - for name in staticmethods: - d[name] = StaticMethodWrapper(assembly_qualified_name, name) - for name in methods: - d[name] = MethodWrapper(name) - - # check if IEnumerable is implemented - if hasIEnumerable: - d['__iter__'] = __iter__ - - assert len(indexers) <= 1 - if indexers: - name, getter, setter, is_static = indexers[0] - assert not is_static - if getter: - d['__getitem__'] = d[getter] - if setter: - d['__setitem__'] = d[setter] - if isClassGeneric: - cls = MetaGenericCliClassWrapper(classname, (CliClassWrapper,), d) - else: - cls = MetaCliClassWrapper(classname, (CliClassWrapper,), d) - - # we must add properties *after* the class has been created - # because we need to store UnboundMethods as getters and setters - for (name, getter, setter, is_static) in properties: - fget = None - fset = None - if getter: - fget = getattr(cls, getter) - if setter: - fset = getattr(cls, setter) - if is_static: - prop = StaticProperty(fget, fset) - else: - prop = property(fget, fset) - setattr(cls, name, prop) - - return cls diff --git a/pypy/module/clr/app_importer.py b/pypy/module/clr/app_importer.py deleted file mode 100644 --- a/pypy/module/clr/app_importer.py +++ /dev/null @@ -1,85 +0,0 @@ -"""NOT_RPYTHON""" - -# Meta hooks are called at the start of Import Processing -# Meta hooks can override the sys.path, frozen modules , built-in modules -# To register a Meta Hook simply add importer object to sys.meta_path - -import sys -import types - -class importer(object): - ''' - If the importer is installed on sys.meta_path, it will - receive a second argument, which is None for a top-level module, or - package.__path__ for submodules or subpackages - - It should return a loader object if the module was found, or None if it wasn\'t. - If find_module() raises an exception, the caller will abort the import. - When importer.find_module("spam.eggs.ham") is called, "spam.eggs" has already - been imported and added to sys.modules. - ''' - - def find_module(self, fullname, path=None): - import clr - namespaces, classes, generics = clr.get_assemblies_info() - - if fullname in namespaces or fullname in classes: - return self # fullname is a .NET Module - else: - return None # fullname is not a .NET Module - - def load_module(self, fullname): - ''' - The load_module() must fulfill the following *before* it runs any code: - Note that the module object *must* be in sys.modules before the - loader executes the module code. - - A If 'fullname' exists in sys.modules, the loader must use that - else the loader must create a new module object and add it to sys.modules. - - module = sys.modules.setdefault(fullname, new.module(fullname)) - - B The __file__ attribute must be set. String say "" - - C The __name__ attribute must be set. If one uses - imp.new_module() then the attribute is set automatically. - - D If it\'s a package, the __path__ variable must be set. This must - be a list, but may be empty if __path__ has no further - significance to the importer (more on this later). - - E It should add a __loader__ attribute to the module, set to the loader object. - - ''' - # If it is a call for a Class then return with the Class reference - import clr - namespaces, classes, generics = clr.get_assemblies_info() - - if fullname in classes: - assemblyname = classes[fullname] - fullname = generics.get(fullname, fullname) - ns, classname = fullname.rsplit('.', 1) - sys.modules[fullname] = clr.load_cli_class(assemblyname, ns, classname) - else: # if not a call for actual class (say for namespaces) assign an empty module - if fullname not in sys.modules: - mod = CLRModule(fullname) - mod.__file__ = "<%s>" % self.__class__.__name__ - mod.__loader__ = self - mod.__name__ = fullname - # add it to the modules dict - sys.modules[fullname] = mod - - # if it is a PACKAGE then we are to initialize the __path__ for the module - # we won't deal with Packages here - return sys.modules[fullname] - -class CLRModule(types.ModuleType): - def __getattr__(self, name): - if not name.startswith("__"): - try: - iname = self.__name__ + '.' + name - __import__(iname) - except ImportError: - pass - return types.ModuleType.__getattribute__(self, name) - diff --git a/pypy/module/clr/assemblyname.py b/pypy/module/clr/assemblyname.py deleted file mode 100644 --- a/pypy/module/clr/assemblyname.py +++ /dev/null @@ -1,2 +0,0 @@ -mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' -System = 'System, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py deleted file mode 100644 --- a/pypy/module/clr/boxing_rules.py +++ /dev/null @@ -1,53 +0,0 @@ -from rpython.tool.pairtype import extendabletype -from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.floatobject import W_FloatObject -from pypy.objspace.std.boolobject import W_BoolObject -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.stringobject import W_StringObject -from rpython.translator.cli.dotnet import box - -class __extend__(W_Root): - __metaclass__ = extendabletype - - def tocli(self): - return box(self) - -class __extend__(W_IntObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.intval) - -class __extend__(W_FloatObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.floatval) - -class __extend__(W_NoneObject): - __metaclass__ = extendabletype - - def tocli(self): - return None - -class __extend__(W_BoolObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.boolval) - -class __extend__(W_StringObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self._value) - -##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -##from rpython.rlib.nonconst import NonConstant - -##class __extend__(W_Object_Fake): -## __metaclass__ = extendabletype - -## def tocli(self): -## return NonConstant(None) diff --git a/pypy/module/clr/interp_clr.py b/pypy/module/clr/interp_clr.py deleted file mode 100644 --- a/pypy/module/clr/interp_clr.py +++ /dev/null @@ -1,364 +0,0 @@ -import os.path -from pypy.module.clr import assemblyname -from pypy.interpreter.baseobjspace import W_Root, W_Root -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, ApplevelClass -from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.dotnet import CLR, box, unbox, NativeException, native_exc,\ - new_array, init_array, typeof - -System = CLR.System -Assembly = CLR.System.Reflection.Assembly -TargetInvocationException = NativeException(CLR.System.Reflection.TargetInvocationException) -AmbiguousMatchException = NativeException(CLR.System.Reflection.AmbiguousMatchException) - -def get_method(space, b_type, name, b_paramtypes): - try: - method = b_type.GetMethod(name, b_paramtypes) - except AmbiguousMatchException: - msg = 'Multiple overloads for %s could match' - raise operationerrfmt(space.w_TypeError, msg, name) - if method is None: - msg = 'No overloads for %s could match' - raise operationerrfmt(space.w_TypeError, msg, name) - return method - -def get_constructor(space, b_type, b_paramtypes): - try: - ctor = b_type.GetConstructor(b_paramtypes) - except AmbiguousMatchException: - msg = 'Multiple constructors could match' - raise OperationError(space.w_TypeError, space.wrap(msg)) - if ctor is None: - msg = 'No overloads for constructor could match' - raise OperationError(space.w_TypeError, space.wrap(msg)) - return ctor - -def rewrap_args(space, w_args, startfrom): - args = space.unpackiterable(w_args) - paramlen = len(args)-startfrom - b_args = new_array(System.Object, paramlen) - b_paramtypes = new_array(System.Type, paramlen) - for i in range(startfrom, len(args)): - j = i-startfrom - b_obj = py2cli(space, args[i]) - b_args[j] = b_obj - if b_obj is None: - b_paramtypes[j] = typeof(System.Object) # we really can't be more precise - else: - b_paramtypes[j] = b_obj.GetType() # XXX: potentially inefficient - return b_args, b_paramtypes - - -def call_method(space, b_obj, b_type, name, w_args, startfrom): - b_args, b_paramtypes = rewrap_args(space, w_args, startfrom) - b_meth = get_method(space, b_type, name, b_paramtypes) - try: - # for an explanation of the box() call, see the log message for revision 35167 - b_res = box(b_meth.Invoke(b_obj, b_args)) - except TargetInvocationException, e: - b_inner = native_exc(e).get_InnerException() - message = str(b_inner.get_Message()) - # TODO: use the appropriate exception, not StandardError - raise OperationError(space.w_StandardError, space.wrap(message)) - if b_meth.get_ReturnType().get_Name() == 'Void': - return space.w_None - else: - return cli2py(space, b_res) - - at unwrap_spec(typename=str, methname=str) -def call_staticmethod(space, typename, methname, w_args): - """ - Call a .NET static method. - - Parameters: - - - typename: the fully qualified .NET name of the class - containing the method (e.g. ``System.Math``) - - - methname: the name of the static method to call (e.g. ``Abs``) - - - args: a list containing the arguments to be passed to the - method. - """ - b_type = System.Type.GetType(typename) # XXX: cache this! - return call_method(space, None, b_type, methname, w_args, 0) - -def py2cli(space, w_obj): - try: - cliobj = space.getattr(w_obj, space.wrap('__cliobj__')) - except OperationError, e: - if e.match(space, space.w_AttributeError): - # it hasn't got a __cloobj__ - return w_obj.tocli() - else: - raise - else: - if isinstance(cliobj, W_CliObject): - return cliobj.b_obj # unwrap it! - else: - # this shouldn't happen! Fallback to the default impl - return w_obj.tocli() - -def cli2py(space, b_obj): - # TODO: support other types and find the most efficient way to - # select the correct case - if b_obj is None: - return space.w_None - - w_obj = unbox(b_obj, W_Root) - if w_obj is not None: - return w_obj # it's already a wrapped object! - - b_type = b_obj.GetType() - if b_type == typeof(System.Int32): - intval = unbox(b_obj, ootype.Signed) - return space.wrap(intval) - elif b_type == typeof(System.Double): - floatval = unbox(b_obj, ootype.Float) - return space.wrap(floatval) - elif b_type == typeof(System.Boolean): - boolval = unbox(b_obj, ootype.Bool) - return space.wrap(boolval) - elif b_type == typeof(System.String): - strval = unbox(b_obj, ootype.String) - return space.wrap(strval) - else: - namespace, classname = split_fullname(b_type.ToString()) - assemblyname = b_type.get_Assembly().get_FullName() - w_cls = load_cli_class(space, assemblyname, namespace, classname) - cliobj = W_CliObject(space, b_obj) - return wrapper_from_cliobj(space, w_cls, cliobj) - -def split_fullname(name): - lastdot = name.rfind('.') - if lastdot < 0: - return '', name - return name[:lastdot], name[lastdot+1:] - -def wrap_list_of_tuples(space, lst): - list_w = [] - for (a,b,c,d) in lst: - items_w = [space.wrap(a), space.wrap(b), space.wrap(c), space.wrap(d)] - list_w.append(space.newtuple(items_w)) - return space.newlist(list_w) - -def wrap_list_of_pairs(space, lst): - list_w = [] - for (a,b) in lst: - items_w = [space.wrap(a), space.wrap(b)] - list_w.append(space.newtuple(items_w)) - return space.newlist(list_w) - -def wrap_list_of_strings(space, lst): - list_w = [space.wrap(s) for s in lst] - return space.newlist(list_w) - -def get_methods(space, b_type): - methods = [] - staticmethods = [] - b_methodinfos = b_type.GetMethods() - for i in range(len(b_methodinfos)): - b_meth = b_methodinfos[i] - if b_meth.get_IsPublic(): - if b_meth.get_IsStatic(): - staticmethods.append(str(b_meth.get_Name())) - else: - methods.append(str(b_meth.get_Name())) - w_staticmethods = wrap_list_of_strings(space, staticmethods) - w_methods = wrap_list_of_strings(space, methods) - return w_staticmethods, w_methods - -def get_properties(space, b_type): - properties = [] - indexers = {} - b_propertyinfos = b_type.GetProperties() - for i in range(len(b_propertyinfos)): - b_prop = b_propertyinfos[i] - get_name = None - set_name = None - is_static = False - if b_prop.get_CanRead(): - get_meth = b_prop.GetGetMethod() - get_name = get_meth.get_Name() - is_static = get_meth.get_IsStatic() - if b_prop.get_CanWrite(): - set_meth = b_prop.GetSetMethod() - if set_meth: - set_name = set_meth.get_Name() - is_static = set_meth.get_IsStatic() - b_indexparams = b_prop.GetIndexParameters() - if len(b_indexparams) == 0: - properties.append((b_prop.get_Name(), get_name, set_name, is_static)) - else: - indexers[b_prop.get_Name(), get_name, set_name, is_static] = None - w_properties = wrap_list_of_tuples(space, properties) - w_indexers = wrap_list_of_tuples(space, indexers.keys()) - return w_properties, w_indexers - -class _CliClassCache: - def __init__(self): - self.cache = {} - - def put(self, fullname, cls): - assert fullname not in self.cache - self.cache[fullname] = cls - - def get(self, fullname): - return self.cache.get(fullname, None) -CliClassCache = _CliClassCache() - -class _AssembliesInfo: - w_namespaces = None - w_classes = None - w_generics = None - w_info = None # a tuple containing (w_namespaces, w_classes, w_generics) -AssembliesInfo = _AssembliesInfo() - -def save_info_for_assembly(space, b_assembly): - info = AssembliesInfo - b_types = b_assembly.GetTypes() - w_assemblyName = space.wrap(b_assembly.get_FullName()) - for i in range(len(b_types)): - b_type = b_types[i] - namespace = b_type.get_Namespace() - fullname = b_type.get_FullName() - if '+' in fullname: - # it's an internal type, skip it - continue - if namespace is not None: - # builds all possible sub-namespaces - # (e.g. 'System', 'System.Windows', 'System.Windows.Forms') - chunks = namespace.split(".") - temp_name = chunks[0] - space.setitem(info.w_namespaces, space.wrap(temp_name), space.w_None) - for chunk in chunks[1:]: - temp_name += "."+chunk - space.setitem(info.w_namespaces, space.wrap(temp_name), space.w_None) - if b_type.get_IsGenericType(): - index = fullname.rfind("`") - assert index >= 0 - pyName = fullname[0:index] - space.setitem(info.w_classes, space.wrap(pyName), w_assemblyName) - space.setitem(info.w_generics, space.wrap(pyName), space.wrap(fullname)) - else: - space.setitem(info.w_classes, space.wrap(fullname), w_assemblyName) - - -def save_info_for_std_assemblies(space): - # in theory we should use Assembly.Load, but it doesn't work with - # pythonnet because it thinks it should use the Load(byte[]) overload - b_mscorlib = Assembly.LoadWithPartialName(assemblyname.mscorlib) - b_System = Assembly.LoadWithPartialName(assemblyname.System) - save_info_for_assembly(space, b_mscorlib) - save_info_for_assembly(space, b_System) - -def get_assemblies_info(space): - info = AssembliesInfo - if info.w_info is None: - info.w_namespaces = space.newdict() - info.w_classes = space.newdict() - info.w_generics = space.newdict() - info.w_info = space.newtuple([info.w_namespaces, info.w_classes, info.w_generics]) - save_info_for_std_assemblies(space) - return info.w_info - -#_______________________________________________________________________________ -# AddReference* methods - -# AddReference', 'AddReferenceByName', 'AddReferenceByPartialName', 'AddReferenceToFile', 'AddReferenceToFileAndPath' - - at unwrap_spec(name=str) -def AddReferenceByPartialName(space, name): - b_assembly = Assembly.LoadWithPartialName(name) - if b_assembly is not None: - save_info_for_assembly(space, b_assembly) - - - at unwrap_spec(assemblyname=str, namespace=str, classname=str) -def load_cli_class(space, assemblyname, namespace, classname): - """ - Load the given .NET class into the PyPy interpreter and return a - Python class referencing to it. - - Parameters: - - - namespace: the full name of the namespace containing the - class (e.g., ``System.Collections``). - - - classname: the name of the class in the specified namespace - (e.g. ``ArrayList``). """ - fullname = '%s.%s' % (namespace, classname) - w_cls = CliClassCache.get(fullname) - if w_cls is None: - w_cls = build_cli_class(space, namespace, classname, fullname, assemblyname) - CliClassCache.put(fullname, w_cls) - return w_cls - -def build_cli_class(space, namespace, classname, fullname, assemblyname): - assembly_qualified_name = '%s, %s' % (fullname, assemblyname) - b_type = System.Type.GetType(assembly_qualified_name) - if b_type is None: - raise operationerrfmt(space.w_ImportError, - "Cannot load .NET type: %s", fullname) - - # this is where we locate the interfaces inherited by the class - # set the flag hasIEnumerable if IEnumerable interface has been by the class - hasIEnumerable = b_type.GetInterface("System.Collections.IEnumerable") is not None - - # this is where we test if the class is Generic - # set the flag isClassGeneric - isClassGeneric = False - if b_type.get_IsGenericType(): - isClassGeneric = True - - w_staticmethods, w_methods = get_methods(space, b_type) - w_properties, w_indexers = get_properties(space, b_type) - return build_wrapper(space, - space.wrap(namespace), - space.wrap(classname), - space.wrap(assemblyname), - w_staticmethods, - w_methods, - w_properties, - w_indexers, - space.wrap(hasIEnumerable), - space.wrap(isClassGeneric)) - - -class W_CliObject(W_Root): - def __init__(self, space, b_obj): - self.space = space - self.b_obj = b_obj - - @unwrap_spec(name=str, startfrom=int) - def call_method(self, name, w_args, startfrom=0): - return call_method(self.space, self.b_obj, self.b_obj.GetType(), name, w_args, startfrom) - - at unwrap_spec(typename=str) -def cli_object_new(space, w_subtype, typename, w_args): - b_type = System.Type.GetType(typename) - b_args, b_paramtypes = rewrap_args(space, w_args, 0) - b_ctor = get_constructor(space, b_type, b_paramtypes) - try: - b_obj = b_ctor.Invoke(b_args) - except TargetInvocationException, e: - b_inner = native_exc(e).get_InnerException() - message = str(b_inner.get_Message()) - # TODO: use the appropriate exception, not StandardError - raise OperationError(space.w_StandardError, space.wrap(message)) - return space.wrap(W_CliObject(space, b_obj)) - -W_CliObject.typedef = TypeDef( - '_CliObject_internal', - __new__ = interp2app(cli_object_new), - call_method = interp2app(W_CliObject.call_method), - ) - -path, _ = os.path.split(__file__) -app_clr = os.path.join(path, 'app_clr.py') -app = ApplevelClass(file(app_clr).read()) -del path, app_clr -build_wrapper = app.interphook("build_wrapper") -wrapper_from_cliobj = app.interphook("wrapper_from_cliobj") diff --git a/pypy/module/clr/test/__init__.py b/pypy/module/clr/test/__init__.py deleted file mode 100644 --- a/pypy/module/clr/test/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# diff --git a/pypy/module/clr/test/test_clr.py b/pypy/module/clr/test/test_clr.py deleted file mode 100644 --- a/pypy/module/clr/test/test_clr.py +++ /dev/null @@ -1,292 +0,0 @@ -from pypy.module.clr.assemblyname import mscorlib - -def skip_if_not_pythonnet(): - import py - try: - import clr - except ImportError: - py.test.skip('Must use pythonnet to access .NET libraries') - -skip_if_not_pythonnet() - -class AppTestDotnet: - spaceconfig = dict(usemodules=('clr',)) - - def setup_class(cls): - cls.w_mscorlib = cls.space.wrap(mscorlib) - - def test_cliobject(self): - import clr - obj = clr._CliObject_internal('System.Collections.ArrayList', []) - max_index = obj.call_method('Add', [42]) - assert max_index == 0 - - def test_cache(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - ArrayList2 = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - assert ArrayList is ArrayList2 - - def test_load_fail(self): - import clr - raises(ImportError, clr.load_cli_class, self.mscorlib, 'Foo', 'Bar') - - def test_ArrayList(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - obj.Add(43) - total = obj.get_Item(0) + obj.get_Item(1) - assert total == 42+43 - - def test_ArrayList_error(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - raises(StandardError, obj.get_Item, 0) - - def test_float_conversion(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42.0) - item = obj.get_Item(0) - assert isinstance(item, float) - - def test_bool_conversion(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(True) - obj.Add(False) - t = obj.get_Item(0) - f = obj.get_Item(1) - assert t and isinstance(t, bool) - assert not f and isinstance(f, bool) - obj.Add(42) - assert obj.Contains(42) - - def test_getitem(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - assert obj[0] == 42 - - def test_property(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - assert obj.Count == 1 - obj.Capacity = 10 - assert obj.Capacity == 10 - - def test_unboundmethod(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - ArrayList.Add(obj, 42) - assert obj.get_Item(0) == 42 - - def test_unboundmethod_typeerror(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - raises(TypeError, ArrayList.Add) - raises(TypeError, ArrayList.Add, 0) - - def test_overload(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - for i in range(10): - obj.Add(i) - assert obj.IndexOf(7) == 7 - assert obj.IndexOf(7, 0, 5) == -1 - - def test_wrong_overload(self): - import clr - Math = clr.load_cli_class(self.mscorlib, 'System', 'Math') - raises(TypeError, Math.Abs, "foo") - - def test_wrong_overload_ctor(self): - from System.Collections import ArrayList - raises(TypeError, ArrayList, "foo") - - def test_staticmethod(self): - import clr - Math = clr.load_cli_class(self.mscorlib, 'System', 'Math') - res = Math.Abs(-42) - assert res == 42 - assert type(res) is int - res = Math.Abs(-42.0) - assert res == 42.0 - assert type(res) is float - - def test_constructor_args(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList(42) - assert obj.Capacity == 42 - - def test_None_as_null(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - Hashtable = clr.load_cli_class(self.mscorlib, 'System.Collections', 'Hashtable') - x = ArrayList() - x.Add(None) - assert x[0] is None - y = Hashtable() - assert y["foo"] is None - - def test_pass_opaque_arguments(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - class Foo: - pass - obj = Foo() - x = ArrayList() - x.Add(obj) - obj2 = x[0] - assert obj is obj2 - - def test_string_wrapping(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - x.Add("bar") - s = x[0] - assert s == "bar" - - def test_static_property(self): - import clr - import os - Environment = clr.load_cli_class(self.mscorlib, 'System', 'Environment') - assert Environment.CurrentDirectory == os.getcwd() - Environment.CurrentDirectory == '/' - assert Environment.CurrentDirectory == os.getcwd() - - def test_GetEnumerator(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - enum = x.GetEnumerator() - assert enum.MoveNext() is False - - def test_iteration_arrayList(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - x.Add(1) - x.Add(2) - x.Add(3) - x.Add(4) - sum = 0 - for i in x: - sum += i - assert sum == 1+2+3+4 - - def test_iteration_stack(self): - import clr - Stack = clr.load_cli_class(self.mscorlib, 'System.Collections', 'Stack') - obj = Stack() - obj.Push(1) - obj.Push(54) - obj.Push(21) - sum = 0 - for i in obj: - sum += i - assert sum == 1+54+21 - - def test_load_generic_class(self): - import clr - ListInt = clr.load_cli_class(self.mscorlib, "System.Collections.Generic", "List`1[System.Int32]") - x = ListInt() - x.Add(42) - x.Add(4) - x.Add(4) - sum = 0 - for i in x: - sum += i - assert sum == 42+4+4 - - def test_generic_class_typeerror(self): - import clr - ListInt = clr.load_cli_class(self.mscorlib, "System.Collections.Generic", "List`1[System.Int32]") - x = ListInt() - raises(TypeError, x.Add, "test") - - def test_generic_dict(self): - import clr - genDictIntStr = clr.load_cli_class(self.mscorlib, - "System.Collections.Generic", - "Dictionary`2[System.Int32,System.String]") - x = genDictIntStr() - x[1] = "test" - x[2] = "rest" - assert x[1] == "test" - assert x[2] == "rest" - raises(TypeError, x.__setitem__, 3, 3) - raises(TypeError, x.__setitem__, 4, 4.453) - raises(TypeError, x.__setitem__, "test", 3) - - def test_generic_metaclass_list(self): - import clr - from System.Collections.Generic import List - import System.Int32 - lst = List[System.Int32]() - lst.Add(42) - assert lst[0] == 42 - raises(TypeError, lst.Add, "test") - - lst = List[int]() - lst.Add(42) - assert lst[0] == 42 - raises(TypeError, lst.Add, "test") - - def test_generic_metaclass_dict(self): - import clr - from System.Collections.Generic import Dictionary - import System.Int32 - import System.String - d1 = Dictionary[System.Int32, System.String]() - d1[42]="test" - assert d1[42] == "test" - raises(TypeError, d1.__setitem__, 42, 42) - - d1 = Dictionary[int, str]() - d1[42]="test" - assert d1[42] == "test" - raises(TypeError, d1.__setitem__, 42, 42) - - def test_generic_metaclass_object(self): - import clr - from System.Collections.Generic import List - class Foo(object): - pass - lst = List[Foo]() - f = Foo() - lst.Add(f) - assert lst[0] is f - - def test_generic_metaclass_typeerror(self): - import clr - from System.Collections.Generic import List - raises(TypeError, "List[int, int]") - - def test_py2cli_cliobjects(self): - from System.IO import StreamReader, MemoryStream - mem = MemoryStream(100) - sr = StreamReader(mem) # does not raise - - def test_external_assemblies(self): - import clr - clr.AddReferenceByPartialName('System.Xml') - from System.IO import StringReader - from System.Xml import XmlReader - buffer = StringReader("test") - xml = XmlReader.Create(buffer) - xml.ReadStartElement("foo") - assert xml.ReadString() == 'test' - xml.ReadEndElement() diff --git a/pypy/module/clr/test/test_importer.py b/pypy/module/clr/test/test_importer.py deleted file mode 100644 --- a/pypy/module/clr/test/test_importer.py +++ /dev/null @@ -1,76 +0,0 @@ -from pypy.module.clr.test.test_clr import skip_if_not_pythonnet - -skip_if_not_pythonnet() - -class AppTestDotnet: - spaceconfig = dict(usemodules=('clr',)) - - def test_list_of_namespaces_and_classes(self): - import clr - ns, classes, generics = clr.get_assemblies_info() - - assert 'System' in ns - assert 'System.Collections' in ns - assert 'System.Runtime' in ns - assert 'System.Runtime.InteropServices' in ns - - assert 'System' not in classes - assert 'System.Math' in classes - assert 'System.Collections.ArrayList' in classes - - assert 'System.Collections.Generic.List' in classes - assert generics['System.Collections.Generic.List'] == 'System.Collections.Generic.List`1' - - def test_import_hook_simple(self): - mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' - import clr - import System.Math - - assert System.Math.Abs(-5) == 5 - assert System.Math.Pow(2, 5) == 2**5 - - Math = clr.load_cli_class(mscorlib, 'System', 'Math') - assert Math is System.Math - - import System - a = System.Collections.Stack() - a.Push(3) - a.Push(44) - sum = 0 - for i in a: - sum += i - assert sum == 3+44 - - import System.Collections.ArrayList - ArrayList = clr.load_cli_class(mscorlib, 'System.Collections', 'ArrayList') - assert ArrayList is System.Collections.ArrayList - - def test_ImportError(self): - def fn(): - import non_existent_module - raises(ImportError, fn) - - def test_import_twice(self): - import System - s1 = System - import System - assert s1 is System - - def test_lazy_import(self): - import System - System.Runtime.InteropServices # does not raise attribute error - - def test_generic_class_import(self): - import System.Collections.Generic.List - - def test_import_from(self): - from System.Collections import ArrayList - - def test_AddReferenceByPartialName(self): - import clr - clr.AddReferenceByPartialName('System.Xml') - import System.Xml.XmlReader # does not raise - - def test_AddReference_early(self): - import clr - clr.AddReferenceByPartialName('System.Xml') diff --git a/pypy/module/clr/test/test_interp_clr.py b/pypy/module/clr/test/test_interp_clr.py deleted file mode 100644 --- a/pypy/module/clr/test/test_interp_clr.py +++ /dev/null @@ -1,10 +0,0 @@ -from pypy.module.clr.interp_clr import split_fullname - -def test_split_fullname(): - split = split_fullname - assert split('Foo') == ('', 'Foo') - assert split('System.Foo') == ('System', 'Foo') - assert split('System.Foo.Bar') == ('System.Foo', 'Bar') - assert split('System.Foo.A+B') == ('System.Foo', 'A+B') - assert split('System.') == ('System', '') - From noreply at buildbot.pypy.org Thu Jul 25 19:56:22 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 25 Jul 2013 19:56:22 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Remove ootype support from pypy Message-ID: <20130725175622.B9AE11C101E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65657:7ab1bd7d7871 Date: 2013-07-25 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/7ab1bd7d7871/ Log: Remove ootype support from pypy diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -340,10 +340,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -351,10 +347,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -234,9 +234,6 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - ## if config.translation.type_system == 'ootype': - ## config.objspace.usemodules.suggest(rbench=True) - config.translation.suggest(check_str_without_nul=True) if config.translation.thread: @@ -271,12 +268,6 @@ elif config.objspace.usemodules.pypyjit: config.translation.jit = True - if config.translation.backend == "cli": - config.objspace.usemodules.clr = True - # XXX did it ever work? - #elif config.objspace.usemodules.clr: - # config.translation.backend == "cli" - if config.translation.sandbox: config.objspace.lonepycfiles = False config.objspace.usepycfiles = False @@ -292,16 +283,6 @@ wrapstr = 'space.wrap(%r)' % (options) pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr - if config.translation.backend in ["cli", "jvm"] and sys.platform == "win32": - # HACK: The ftruncate implementation in streamio.py which is used for the Win32 platform - # is specific for the C backend and can't be generated on CLI or JVM. Because of that, - # we have to patch it out. - from rpython.rlib import streamio - def ftruncate_win32_dummy(fd, size): pass - def _setfd_binary_dummy(fd): pass - streamio.ftruncate_win32 = ftruncate_win32_dummy - streamio._setfd_binary = _setfd_binary_dummy - return self.get_entry_point(config) def jitpolicy(self, driver): diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -18,10 +18,10 @@ The builtin Unicode codecs use the following interface: - _encode(Unicode_object[,errors='strict']) -> + _encode(Unicode_object[,errors='strict']) -> (string object, bytes consumed) - _decode(char_buffer_obj[,errors='strict']) -> + _decode(char_buffer_obj[,errors='strict']) -> (Unicode object, bytes consumed) _encode() interfaces also accept non-Unicode object as @@ -90,8 +90,7 @@ "NOT_RPYTHON" # mbcs codec is Windows specific, and based on rffi. - if (hasattr(runicode, 'str_decode_mbcs') and - space.config.translation.type_system != 'ootype'): + if (hasattr(runicode, 'str_decode_mbcs')): self.interpleveldefs['mbcs_encode'] = 'interp_codecs.mbcs_encode' self.interpleveldefs['mbcs_decode'] = 'interp_codecs.mbcs_decode' diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -12,18 +12,6 @@ "set_file_encoding": "interp_file.set_file_encoding", } - def __init__(self, space, *args): - "NOT_RPYTHON" - - # on windows with oo backends, remove file.truncate, - # because the implementation is based on rffi - if (sys.platform == 'win32' and - space.config.translation.type_system == 'ootype'): - from pypy.module._file.interp_file import W_File - del W_File.typedef.rawdict['truncate'] - - MixedModule.__init__(self, space, *args) - def shutdown(self, space): # at shutdown, flush all open streams. Ignore I/O errors. from pypy.module._file.interp_file import getopenstreams, StreamErrors diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -4,25 +4,6 @@ import os exec 'import %s as posix' % os.name -# this is the list of function which is *not* present in the posix module of -# IronPython 2.6, and that we want to ignore for now -lltype_only_defs = [ - 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', - 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', - 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', - 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', - 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', - 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', - 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', - 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', - 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', - 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] - -# the Win32 urandom implementation isn't going to translate on JVM or CLI so -# we have to remove it -lltype_only_defs.append('urandom') - class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -54,7 +35,7 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { 'open' : 'interp_posix.open', 'lseek' : 'interp_posix.lseek', @@ -168,8 +149,8 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', 'setregid', 'getsid', 'setsid']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) @@ -178,24 +159,15 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name - def __init__(self, space, w_name): - # if it's an ootype translation, remove all the defs that are lltype - # only - backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': - for name in lltype_only_defs: - self.interpleveldefs.pop(name, None) - MixedModule.__init__(self, space, w_name) - def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -215,10 +215,7 @@ enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) def build_stat_result(space, st): - if space.config.translation.type_system == 'ootype': - FIELDS = PORTABLE_STAT_FIELDS - else: - FIELDS = STAT_FIELDS # also when not translating at all + FIELDS = STAT_FIELDS # also when not translating at all lst = [None] * ll_os_stat.N_INDEXABLE_FIELDS w_keywords = space.newdict() stat_float_times = space.fromcache(StatState).stat_float_times @@ -480,11 +477,7 @@ def getstatfields(space): # for app_posix.py: export the list of 'st_xxx' names that we know # about at RPython level - if space.config.translation.type_system == 'ootype': - FIELDS = PORTABLE_STAT_FIELDS - else: - FIELDS = STAT_FIELDS # also when not translating at all - return space.newlist([space.wrap(name) for _, (name, _) in FIELDS]) + return space.newlist([space.wrap(name) for _, (name, _) in STAT_FIELDS]) class State: diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -3,12 +3,12 @@ from rpython.rlib.objectmodel import we_are_translated def getdefaultencoding(space): - """Return the current default string encoding used by the Unicode + """Return the current default string encoding used by the Unicode implementation.""" return space.wrap(space.sys.defaultencoding) def setdefaultencoding(space, w_encoding): - """Set the current default string encoding used by the Unicode + """Set the current default string encoding used by the Unicode implementation.""" encoding = space.str_w(w_encoding) mod = space.getbuiltinmodule("_codecs") @@ -37,10 +37,6 @@ base_encoding = None def _getfilesystemencoding(space): - if (space.config.translation.type_system == 'ootype'): - # XXX: fix this for ootype - return base_encoding - # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: try: diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -493,9 +493,6 @@ def unicode_startswith__Unicode_Unicode_ANY_ANY(space, w_self, w_substr, w_start, w_end): self, start, end = _convert_idx_params(space, w_self, w_start, w_end, True) - # XXX this stuff can be waaay better for ootypebased backends if - # we re-use more of our rpython machinery (ie implement startswith - # with additional parameters as rpython) return space.newbool(startswith(self, w_substr._value, start, end)) def unicode_startswith__Unicode_ANY_ANY_ANY(space, w_unistr, w_prefixes, diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -1,7 +1,7 @@ """ A file that invokes translation of PyPy with the JIT enabled. -Run it with py.test -s --pdb pypyjit.py [--ootype] +Run it with py.test -s --pdb pypyjit.py """ @@ -14,20 +14,9 @@ from rpython.rtyper.annlowlevel import llhelper, llstr, oostr, hlstr from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.ootypesystem import ootype from pypy.interpreter.pycode import PyCode from rpython.translator.goal import unixcheckpoint -if not hasattr(py.test.config.option, 'ootype'): - import sys - print >> sys.stderr, __doc__ - sys.exit(2) - -if py.test.config.option.ootype: - BACKEND = 'cli' -else: - BACKEND = 'c' - config = get_pypy_config(translating=True) config.translation.backendopt.inline_threshold = 0.1 config.translation.gc = 'boehm' @@ -45,15 +34,8 @@ # set_pypy_opt_level(config, level='jit') -if BACKEND == 'c': - config.objspace.std.multimethods = 'mrd' - multimethod.Installer = multimethod.InstallerVersion2 -elif BACKEND == 'cli': - config.objspace.std.multimethods = 'doubledispatch' - multimethod.Installer = multimethod.InstallerVersion1 - config.translation.backend = 'cli' -else: - assert False +config.objspace.std.multimethods = 'mrd' +multimethod.Installer = multimethod.InstallerVersion2 print config import sys, pdb @@ -82,13 +64,8 @@ code = ec.compiler.compile(source, filename, 'exec', 0) return llstr(space.str_w(dumps(space, code, space.wrap(2)))) -if BACKEND == 'c': - FPTR = lltype.Ptr(lltype.FuncType([], lltype.Ptr(STR))) - read_code_ptr = llhelper(FPTR, read_code) -else: - llstr = oostr - FUNC = ootype.StaticMethod([], ootype.String) - read_code_ptr = llhelper(FUNC, read_code) +FPTR = lltype.Ptr(lltype.FuncType([], lltype.Ptr(STR))) +read_code_ptr = llhelper(FPTR, read_code) def entry_point(): from pypy.module.marshal.interp_marshal import loads @@ -117,13 +94,8 @@ from rpython.jit.codewriter.codewriter import CodeWriter CodeWriter.debug = True - from rpython.jit.tl.pypyjit_child import run_child, run_child_ootype - if BACKEND == 'c': - run_child(globals(), locals()) - elif BACKEND == 'cli': - run_child_ootype(globals(), locals()) - else: - assert False + from rpython.jit.tl.pypyjit_child import run_child + run_child(globals(), locals()) if __name__ == '__main__': diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -19,14 +19,6 @@ apply_jit(interp, graph, LLtypeCPU) -def run_child_ootype(glob, loc): - import sys, pdb - interp = loc['interp'] - graph = loc['graph'] - from rpython.jit.backend.llgraph.runner import OOtypeCPU - apply_jit(interp, graph, OOtypeCPU) - - def apply_jit(interp, graph, CPUClass): print 'warmspot.jittify_and_run() started...' policy = PyPyJitPolicy() From noreply at buildbot.pypy.org Thu Jul 25 20:00:12 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 20:00:12 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130725180012.5E3B61C101E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65658:9d73701fdecb Date: 2013-07-25 19:58 +0200 http://bitbucket.org/pypy/pypy/changeset/9d73701fdecb/ Log: Fix. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -47,7 +47,7 @@ _builder = StringBuilder def _newlist_unwrapped(self, space, res): - return space.wrap([W_BytearrayObject(list(i)) for i in res]) + return space.newlist([W_BytearrayObject(list(i)) for i in res]) def _isupper(self, ch): return ch.isupper() diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -490,8 +490,8 @@ if isinstance(self, W_BytearrayObject): w_sub = self._new(sub) return space.newtuple( - [self._sliced(space, value, 0, pos, value), w_sub, - self._sliced(space, value, pos+len(sub), len(value), value)]) + [self._sliced(space, value, 0, pos, self), w_sub, + self._sliced(space, value, pos+len(sub), len(value), self)]) @specialize.argtype(0) def descr_rpartition(self, space, w_sub): @@ -508,8 +508,8 @@ if isinstance(self, W_BytearrayObject): w_sub = self._new(sub) return space.newtuple( - [self._sliced(space, value, 0, pos, value), w_sub, - self._sliced(space, value, pos+len(sub), len(value), value)]) + [self._sliced(space, value, 0, pos, self), w_sub, + self._sliced(space, value, pos+len(sub), len(value), self)]) @unwrap_spec(count=int) @specialize.argtype(0) From noreply at buildbot.pypy.org Thu Jul 25 20:01:36 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 25 Jul 2013 20:01:36 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: removed some more ootype stuff around stat and configuration Message-ID: <20130725180136.404AC1C101E@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: kill-ootype Changeset: r65659:35ef76608d04 Date: 2013-07-25 11:00 -0700 http://bitbucket.org/pypy/pypy/changeset/35ef76608d04/ Log: removed some more ootype stuff around stat and configuration diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,11 +48,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "_sha", "cStringIO", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -208,11 +208,8 @@ # ____________________________________________________________ -# For LL backends, expose all fields. -# For OO backends, only the portable fields (the first 10). STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) -PORTABLE_STAT_FIELDS = unrolling_iterable( - enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) + def build_stat_result(space, st): FIELDS = STAT_FIELDS # also when not translating at all diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -19,10 +19,7 @@ def __init__(self, rtyper): self.rtyper = rtyper - if rtyper.type_system.name == "lltypesystem": - self.stat_fields = ll_os_stat.STAT_FIELDS - else: - self.stat_fields = ll_os_stat.PORTABLE_STAT_FIELDS + self.stat_fields = ll_os_stat.STAT_FIELDS self.stat_field_indexes = {} for i, (name, TYPE) in enumerate(self.stat_fields): From noreply at buildbot.pypy.org Thu Jul 25 20:11:29 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 25 Jul 2013 20:11:29 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Use unicode_from_encoded_object() instead of decode_object(). decode_object() doesn't always return unicode. Message-ID: <20130725181129.D8C451C0149@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65660:b23dd3b042f3 Date: 2013-07-25 20:08 +0200 http://bitbucket.org/pypy/pypy/changeset/b23dd3b042f3/ Log: Use unicode_from_encoded_object() instead of decode_object(). decode_object() doesn't always return unicode. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -10,7 +10,7 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.unicodeobject import (unicode_from_string, - decode_object, _get_encoding_and_errors) + decode_object, unicode_from_encoded_object, _get_encoding_and_errors) from rpython.rlib.jit import we_are_jitted from rpython.rlib.objectmodel import compute_hash, compute_unique_id from rpython.rlib.rstring import StringBuilder @@ -177,7 +177,7 @@ def descr_add(self, space, w_other): if space.isinstance_w(w_other, space.w_unicode): - self_as_unicode = decode_object(space, self, None, None) + self_as_unicode = unicode_from_encoded_object(space, self, None, None) return space.add(self_as_unicode, w_other) elif space.isinstance_w(w_other, space.w_bytearray): # XXX: eliminate double-copy @@ -188,13 +188,13 @@ def _startswith(self, space, value, w_prefix, start, end): if space.isinstance_w(w_prefix, space.w_unicode): - self_as_unicode = decode_object(space, self, None, None) + self_as_unicode = unicode_from_encoded_object(space, self, None, None) return self_as_unicode._startswith(space, value, w_prefix, start, end) return StringMethods._startswith(self, space, value, w_prefix, start, end) def _endswith(self, space, value, w_suffix, start, end): if space.isinstance_w(w_suffix, space.w_unicode): - self_as_unicode = decode_object(space, self, None, None) + self_as_unicode = unicode_from_encoded_object(space, self, None, None) return self_as_unicode._endswith(space, value, w_suffix, start, end) return StringMethods._endswith(self, space, value, w_suffix, start, end) From noreply at buildbot.pypy.org Thu Jul 25 20:26:59 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 25 Jul 2013 20:26:59 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: fix a somewhat silly test Message-ID: <20130725182659.548521C0149@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65661:6d258e2626a5 Date: 2013-07-25 19:25 +0100 http://bitbucket.org/pypy/pypy/changeset/6d258e2626a5/ Log: fix a somewhat silly test diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -101,20 +101,24 @@ Variable()) assert cc.guess_call_kind(op) == 'recursive' - op = SpaceOperation('direct_call', [Constant(object())], + class fakeresidual: + _obj = object() + op = SpaceOperation('direct_call', [Constant(fakeresidual)], Variable()) assert cc.guess_call_kind(op) == 'residual' class funcptr: - class graph: - class func: - oopspec = "spec" + class _obj: + class graph: + class func: + oopspec = "spec" op = SpaceOperation('direct_call', [Constant(funcptr)], Variable()) assert cc.guess_call_kind(op) == 'builtin' class funcptr: - graph = g + class _obj: + graph = g op = SpaceOperation('direct_call', [Constant(funcptr)], Variable()) res = cc.graphs_from(op) @@ -122,7 +126,8 @@ assert cc.guess_call_kind(op) == 'regular' class funcptr: - graph = object() + class _obj: + graph = object() op = SpaceOperation('direct_call', [Constant(funcptr)], Variable()) res = cc.graphs_from(op) From noreply at buildbot.pypy.org Thu Jul 25 22:31:26 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 25 Jul 2013 22:31:26 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: remove the last (?) oosend references Message-ID: <20130725203126.A77471C142B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65662:d9308fb479f6 Date: 2013-07-25 21:30 +0100 http://bitbucket.org/pypy/pypy/changeset/d9308fb479f6/ Log: remove the last (?) oosend references diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -65,7 +65,7 @@ while todo: top_graph = todo.pop() for _, op in top_graph.iterblockops(): - if op.opname not in ("direct_call", "indirect_call", "oosend"): + if op.opname not in ("direct_call", "indirect_call"): continue kind = self.guess_call_kind(op, is_candidate) # use callers() to view the calling chain in pdb @@ -90,13 +90,8 @@ if is_candidate(graph): return [graph] # common case: look inside this graph else: - assert op.opname in ('indirect_call', 'oosend') - if op.opname == 'indirect_call': - graphs = op.args[-1].value - else: - v_obj = op.args[1].concretetype - graphs = v_obj._lookup_graphs(op.args[0].value) - # + assert op.opname == 'indirect_call' + graphs = op.args[-1].value if graphs is None: # special case: handle the indirect call that goes to # the 'instantiate' methods. This check is a bit imprecise @@ -141,10 +136,6 @@ return 'residual' if hasattr(targetgraph.func, 'oopspec'): return 'builtin' - elif op.opname == 'oosend': - SELFTYPE, methname, opargs = support.decompose_oosend(op) - if SELFTYPE.oopspec_name is not None: - return 'builtin' if self.graphs_from(op, is_candidate) is None: return 'residual' return 'regular' diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -814,15 +814,3 @@ rtyper._builtin_func_for_spec_cache[key] = (c_func, LIST_OR_DICT) # return c_func, LIST_OR_DICT - - -def decompose_oosend(op): - name = op.args[0].value - opargs = op.args[1:] - SELFTYPE = opargs[0].concretetype - return SELFTYPE, name, opargs - -def lookup_oosend_method(op): - SELFTYPE, methname, args_v = decompose_oosend(op) - _, meth = SELFTYPE._lookup(methname) - return SELFTYPE, methname, meth diff --git a/rpython/jit/metainterp/jitprof.py b/rpython/jit/metainterp/jitprof.py --- a/rpython/jit/metainterp/jitprof.py +++ b/rpython/jit/metainterp/jitprof.py @@ -112,7 +112,7 @@ def count_ops(self, opnum, kind=Counters.OPS): from rpython.jit.metainterp.resoperation import rop self.counters[kind] += 1 - if opnum == rop.CALL and kind == Counters.RECORDED_OPS:# or opnum == rop.OOSEND: + if opnum == rop.CALL and kind == Counters.RECORDED_OPS: self.calls += 1 def print_stats(self): diff --git a/rpython/translator/backendopt/graphanalyze.py b/rpython/translator/backendopt/graphanalyze.py --- a/rpython/translator/backendopt/graphanalyze.py +++ b/rpython/translator/backendopt/graphanalyze.py @@ -156,10 +156,6 @@ break return self.finalize_builder(result) - def analyze_oosend(self, TYPE, name, seen=None): - graphs = TYPE._lookup_graphs(name) - return self.analyze_indirect_call(graphs, seen) - def analyze_all(self, graphs=None): if graphs is None: graphs = self.translator.graphs From noreply at buildbot.pypy.org Thu Jul 25 22:45:44 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:44 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: failing test Message-ID: <20130725204544.45A991C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65663:5dff0c40663d Date: 2013-07-23 01:02 +0200 http://bitbucket.org/pypy/pypy/changeset/5dff0c40663d/ Log: failing test diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -48,7 +48,7 @@ pdir = _get_next_path(ext='') p = pdir.ensure(dir=1).join('__main__.py') p.write(str(py.code.Source(source))) - # return relative path for testing purposes + # return relative path for testing purposes return py.path.local().bestrelpath(pdir) demo_script = getscript(""" @@ -706,6 +706,15 @@ assert 'hello world\n' in data assert '42\n' in data + def test_putenv_fires_interactive_within_process(self): + # should be noninteractive when piped in + data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' + self.run('', senddata=data, expect_prompt=False) + + # should go interactive with -c + data = data.replace('\n', ';') + self.run("-c '%s'" % data, expect_prompt=True) + def test_option_S_copyright(self): data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data @@ -971,7 +980,7 @@ pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') app_main.setup_bootstrap_path(pypy_c) newpath = sys.path[:] - # we get at least lib_pypy + # we get at least lib_pypy # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) assert len(newpath) >= 2 for p in newpath: From noreply at buildbot.pypy.org Thu Jul 25 22:45:45 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:45 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: implement __pypy__.os.real_getenv with a test Message-ID: <20130725204545.7B8651C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65664:8f98f2b808f9 Date: 2013-07-23 22:52 +0200 http://bitbucket.org/pypy/pypy/changeset/8f98f2b808f9/ Log: implement __pypy__.os.real_getenv with a test diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -50,6 +50,13 @@ } +class OsModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'real_getenv': 'interp_os.real_getenv' + } + + class Module(MixedModule): appleveldefs = { } @@ -82,6 +89,7 @@ "time": TimeModule, "thread": ThreadModule, "intop": IntOpModule, + "os": OsModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_os.py @@ -0,0 +1,9 @@ +from rpython.rtyper.module.ll_os_environ import getenv_llimpl + +from pypy.interpreter.gateway import unwrap_spec + + + at unwrap_spec(name=str) +def real_getenv(space, name): + """Get an OS environment value skipping Python cache""" + return space.wrap(getenv_llimpl(name)) diff --git a/pypy/module/__pypy__/test/test_os.py b/pypy/module/__pypy__/test/test_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_os.py @@ -0,0 +1,16 @@ +class AppTestOs: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_real_getenv(self): + import __pypy__.os + import os + + key = 'UNLIKELY_SET' + assert key not in os.environ + os.putenv(key, '42') + # this one skips Python cache + assert __pypy__.os.real_getenv(key) == '42' + # this one can only see things set on interpter start (cached) + assert os.getenv(key) is None + os.unsetenv(key) + assert __pypy__.os.real_getenv(key) is None From noreply at buildbot.pypy.org Thu Jul 25 22:45:46 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:46 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: attempt to fix app_main Message-ID: <20130725204546.C52371C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65665:0806ff7d7880 Date: 2013-07-23 23:00 +0200 http://bitbucket.org/pypy/pypy/changeset/0806ff7d7880/ Log: attempt to fix app_main diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -460,8 +460,11 @@ if os.getenv('PYTHONVERBOSE'): options["verbose"] = 1 + # skip environment cache since PYTHONINSPECT could be set in same process + from __pypy__.os import real_getenv + if (options["interactive"] or - (not options["ignore_environment"] and os.getenv('PYTHONINSPECT'))): + (not options["ignore_environment"] and real_getenv('PYTHONINSPECT'))): options["inspect"] = 1 ## We don't print the warning, because it offers no additional security From noreply at buildbot.pypy.org Thu Jul 25 22:45:48 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:48 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: try using RPython os.getenv instead of LL version Message-ID: <20130725204548.037C81C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65666:9a20d5ede0ec Date: 2013-07-24 08:45 +0200 http://bitbucket.org/pypy/pypy/changeset/9a20d5ede0ec/ Log: try using RPython os.getenv instead of LL version diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py --- a/pypy/module/__pypy__/interp_os.py +++ b/pypy/module/__pypy__/interp_os.py @@ -1,4 +1,4 @@ -from rpython.rtyper.module.ll_os_environ import getenv_llimpl +import os from pypy.interpreter.gateway import unwrap_spec @@ -6,4 +6,4 @@ @unwrap_spec(name=str) def real_getenv(space, name): """Get an OS environment value skipping Python cache""" - return space.wrap(getenv_llimpl(name)) + return space.wrap(os.getenv(name)) From noreply at buildbot.pypy.org Thu Jul 25 22:45:49 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:49 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: no None allowed Message-ID: <20130725204549.A35891C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65667:0f1dbffcb578 Date: 2013-07-24 21:46 +0200 http://bitbucket.org/pypy/pypy/changeset/0f1dbffcb578/ Log: no None allowed diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py --- a/pypy/module/__pypy__/interp_os.py +++ b/pypy/module/__pypy__/interp_os.py @@ -3,7 +3,7 @@ from pypy.interpreter.gateway import unwrap_spec - at unwrap_spec(name=str) + at unwrap_spec(name='str0') def real_getenv(space, name): """Get an OS environment value skipping Python cache""" - return space.wrap(os.getenv(name)) + return space.wrap(os.environ.get(name)) From noreply at buildbot.pypy.org Thu Jul 25 22:45:50 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:50 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: use __pypy__.os Message-ID: <20130725204550.E9FA11C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65668:c1575d6e1de5 Date: 2013-07-25 15:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c1575d6e1de5/ Log: use __pypy__.os diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -460,11 +460,8 @@ if os.getenv('PYTHONVERBOSE'): options["verbose"] = 1 - # skip environment cache since PYTHONINSPECT could be set in same process - from __pypy__.os import real_getenv - if (options["interactive"] or - (not options["ignore_environment"] and real_getenv('PYTHONINSPECT'))): + (not options["ignore_environment"] and os.getenv('PYTHONINSPECT'))): options["inspect"] = 1 ## We don't print the warning, because it offers no additional security @@ -559,8 +556,15 @@ # or # * PYTHONINSPECT is set and stdin is a tty. # + try: + # we need a version of getenv that bypasses Python caching + from __pypy__.os import real_getenv + except ImportError: + # dont fail on CPython here + real_getenv = os.getenv + return (interactive or - ((inspect or (readenv and os.getenv('PYTHONINSPECT'))) + ((inspect or (readenv and real_getenv('PYTHONINSPECT'))) and sys.stdin.isatty())) success = True From noreply at buildbot.pypy.org Thu Jul 25 22:45:52 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:52 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: skip on CPython, whatsnew Message-ID: <20130725204552.4E5CF1C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65669:a4689d27a2ef Date: 2013-07-25 21:49 +0200 http://bitbucket.org/pypy/pypy/changeset/a4689d27a2ef/ Log: skip on CPython, whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -31,3 +31,9 @@ more precise information about which functions can be called. Needed for Topaz. .. branch: ssl_moving_write_buffer + +.. branch: pythoninspect-fix +Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process +to start interactive prompt when the script execution finishes. This adds +new __pypy__.os.real_getenv call that bypasses Python cache and looksup env +in the underlying OS. Translatorshell now works on PyPy. diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -707,6 +707,11 @@ assert '42\n' in data def test_putenv_fires_interactive_within_process(self): + try: + import __pypy__ + except ImportError: + py.test.skip("This can be only tested on PyPy with get_realenv") + # should be noninteractive when piped in data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' self.run('', senddata=data, expect_prompt=False) From noreply at buildbot.pypy.org Thu Jul 25 22:45:54 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:54 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: merge default Message-ID: <20130725204554.1DF5B1C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65670:3b8868e9d503 Date: 2013-07-25 22:11 +0200 http://bitbucket.org/pypy/pypy/changeset/3b8868e9d503/ Log: merge default diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,62 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +Highlights +========== + +* Support for os.statvfs and os.fstatvfs on unix systems. + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +* During packaging, compile the CFFI tk extension. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -32,8 +32,16 @@ .. branch: ssl_moving_write_buffer +<<<<<<< local .. branch: pythoninspect-fix Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process to start interactive prompt when the script execution finishes. This adds new __pypy__.os.real_getenv call that bypasses Python cache and looksup env in the underlying OS. Translatorshell now works on PyPy. +======= +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. +>>>>>>> other diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -150,7 +150,7 @@ return self.do_and_wrap(w_ffitype) -class TestFromAppLevel(object): +class TestToAppLevel(object): spaceconfig = dict(usemodules=('_ffi',)) def setup_class(cls): diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -32,21 +33,21 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'statvfs_result': 'app_posix.statvfs_result', - 'fdopen' : 'app_posix.fdopen', - 'tmpfile' : 'app_posix.tmpfile', - 'popen' : 'app_posix.popen', - 'tmpnam' : 'app_posix.tmpnam', - 'tempnam' : 'app_posix.tempnam', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'fdopen': 'app_posix.fdopen', + 'tmpfile': 'app_posix.tmpfile', + 'popen': 'app_posix.popen', + 'tmpnam': 'app_posix.tmpnam', + 'tempnam': 'app_posix.tempnam', } if os.name == 'nt': appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -54,44 +55,46 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', } if hasattr(os, 'chown'): @@ -168,9 +171,9 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: @@ -178,7 +181,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -187,7 +190,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -195,7 +198,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,15 +1,17 @@ -from pypy.interpreter.gateway import unwrap_spec +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat from pypy.module.sys.interp_encoding import getfilesystemencoding -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -213,6 +215,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -253,6 +256,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -314,6 +327,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstatvfs(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -169,7 +169,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -183,6 +184,16 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + if hasattr(__import__(os.name), "statvfs"): + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,6 +125,9 @@ self.llbox = llbox def descr_getint(self, space): + if not jit_hooks.box_isint(self.llbox): + raise OperationError(space.w_NotImplementedError, + space.wrap("Box has no int value")) return space.wrap(jit_hooks.box_getint(self.llbox)) @unwrap_spec(no=int) @@ -182,7 +185,12 @@ @unwrap_spec(no=int) def descr_getarg(self, space, no): - return WrappedBox(jit_hooks.resop_getarg(self.op, no)) + try: + box = jit_hooks.resop_getarg(self.op, no) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("Index out of range")) + return WrappedBox(box) @unwrap_spec(no=int, w_box=WrappedBox) def descr_setarg(self, space, no, w_box): @@ -232,7 +240,8 @@ getarg = interp2app(WrappedOp.descr_getarg), setarg = interp2app(WrappedOp.descr_setarg), result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult) + WrappedOp.descr_setresult), + offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.acceptable_as_base_class = False @@ -342,6 +351,10 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), + asmaddr = interp_attrproperty('asmaddr', cls=W_JitLoopInfo, + doc="Address of machine code"), + asmlen = interp_attrproperty('asmlen', cls=W_JitLoopInfo, + doc="Length of machine code"), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -71,7 +71,7 @@ greenkey) di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'loop', greenkey) - di_loop.asminfo = AsmInfo(offset, 0, 0) + di_loop.asminfo = AsmInfo(offset, 0x42, 12) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'bridge', fail_descr=BasicFailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) @@ -123,6 +123,8 @@ assert info.greenkey[2] == False assert info.loop_no == 0 assert info.type == 'loop' + assert info.asmaddr == 0x42 + assert info.asmlen == 12 raises(TypeError, 'info.bridge_no') assert len(info.operations) == 4 int_add = info.operations[0] @@ -132,8 +134,10 @@ assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 assert dmp.call_id == 0 + assert dmp.offset == -1 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num + assert int_add.offset == 0 self.on_compile_bridge() expected = ('>' % repr(self.f.func_code)) @@ -160,6 +164,20 @@ assert 'jit hook' in s.getvalue() assert 'ZeroDivisionError' in s.getvalue() + def test_on_compile_crashes(self): + import pypyjit + loops = [] + def hook(loop): + loops.append(loop) + pypyjit.set_compile_hook(hook) + self.on_compile() + loop = loops[0] + op = loop.operations[2] + # Should not crash the interpreter + raises(IndexError, op.getarg, 2) + assert op.name == 'guard_nonnull' + raises(NotImplementedError, op.getarg(0).getint) + def test_non_reentrant(self): import pypyjit l = [] diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -33,10 +33,8 @@ def test_resource(): - try: - import lib_pypy.resource - except ImportError: - py.test.skip('no syslog on this platform') + if sys.platform == 'win32': + py.test.skip('no resource module on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/pypy/module/test_lib_pypy/test_md5_extra.py b/pypy/module/test_lib_pypy/test_md5_extra.py --- a/pypy/module/test_lib_pypy/test_md5_extra.py +++ b/pypy/module/test_lib_pypy/test_md5_extra.py @@ -1,227 +1,226 @@ """A test script to compare MD5 implementations. -A note about performance: the pure Python MD5 takes roughly -160 sec. per MB of data on a 233 MHz Intel Pentium CPU. +A note about performance: the pure Python MD5 takes roughly 160 sec. per +MB of data on a 233 MHz Intel Pentium CPU. """ +import md5 -from __future__ import absolute_import -import md5 # CPython's implementation in C. -from lib_pypy import _md5 as pymd5 +from pypy.module.test_lib_pypy.support import import_lib_pypy -# Helpers... +def compare_host(message, d2, d2h): + """Compare results against the host Python's builtin md5. -def formatHex(str): - "Print a string's HEX code in groups of two digits." - - d = map(None, str) - d = map(ord, d) - d = map(lambda x:"%02x" % x, d) - return ' '.join(d) - - -def format(str): - "Print a string as-is in groups of two characters." - - s = '' - for i in range(0, len(str)-1, 2): - s = s + "%03s" % str[i:i+2] - return s[1:] - - -def printDiff(message, d1, d2, expectedResult=None): - "Print different outputs for same message." - - print "Message: '%s'" % message - print "Message length: %d" % len(message) - if expectedResult: - print "%-48s (expected)" % format(expectedResult) - print "%-48s (Std. lib. MD5)" % formatHex(d1) - print "%-48s (Pure Python MD5)" % formatHex(d2) - print - - -# The real comparison function. - -def compareImp(message): - """Compare two MD5 implementations, C vs. pure Python module. - - For equal digests this returns None, otherwise it returns - a tuple of both digests. + For equal digests this returns None, otherwise it returns a tuple of + both digests. """ - - # Use Python's standard library MD5 compiled C module. + # Use the host Python's standard library MD5 compiled C module. m1 = md5.md5() m1.update(message) d1 = m1.digest() d1h = m1.hexdigest() - - # Use MD5 module in pure Python. - m2 = pymd5.new() - m2.update(message) - d2 = m2.digest() - d2h = m2.hexdigest() + # Return None if equal or the different digests if not equal. + return None if d1 == d2 and d1h == d2h else (d1, d2) - # Return None if equal or the different digests if not equal. - if d1 == d2 and d1h == d2h: - return - else: - return d1, d2 +class TestMD5Update: -class TestMD5Compare: - "Compare pure Python MD5 against Python's std. lib. version." - + spaceconfig = dict(usemodules=('struct',)) + + def test_update(self): + """Test updating cloned objects.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) + space = self.space + w__md5 = import_lib_pypy(space, '_md5') + + # Load both with same prefix. + prefix1 = 2**10 * 'a' + + # The host md5 + m1 = md5.md5() + m1.update(prefix1) + m1c = m1.copy() + + # The app-level _md5 + w_m2 = space.call_method(w__md5, 'new') + space.call_method(w_m2, 'update', space.wrap(prefix1)) + w_m2c = space.call_method(w_m2, 'copy') + + # Update and compare... + for i in range(len(cases)): + message = cases[i][0] + + m1c.update(message) + d1 = m1c.hexdigest() + + space.call_method(w_m2c, 'update', space.wrap(message)) + w_d2 = space.call_method(w_m2c, 'hexdigest') + d2 = space.str_w(w_d2) + + assert d1 == d2 + + +class AppTestMD5Compare: + """Compare pure Python MD5 against Python's std. lib. version.""" + + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + from pypy.interpreter import gateway + space = cls.space + cls.w__md5 = import_lib_pypy(space, '_md5') + if cls.runappdirect: + # interp2app doesn't work in appdirect mode + cls.w_compare_host = staticmethod(compare_host) + else: + compare_host.unwrap_spec = [str, str, str] + cls.w_compare_host = space.wrap(gateway.interp2app(compare_host)) + + def w_compare(self, message): + # Generate results against the app-level pure Python MD5 and + # pass them off for comparison against the host Python's MD5 + m2 = self._md5.new() + m2.update(message) + return self.compare_host(message, m2.digest(), m2.hexdigest()) + + def w__format_hex(self, string): + """Print a string's HEX code in groups of two digits.""" + d = map(None, string) + d = map(ord, d) + d = map(lambda x: "%02x" % x, d) + return ' '.join(d) + + def w__format(self, string): + """Print a string as-is in groups of two characters.""" + s = '' + for i in range(0, len(string) - 1, 2): + s = s + "%03s" % string[i:i + 2] + return s[1:] + + def w_print_diff(self, message, d1, d2, expectedResult=None): + """Print different outputs for same message.""" + print("Message: '%s'" % message) + print("Message length: %d" % len(message)) + if expectedResult: + print("%-48s (expected)" % self._format(expectedResult)) + print("%-48s (Std. lib. MD5)" % self._format_hex(d1)) + print("%-48s (Pure Python MD5)" % self._format_hex(d2)) + print() + def test1(self): - "Test cases with known digest result." - + """Test cases with known digest result.""" cases = ( - ("", - "d41d8cd98f00b204e9800998ecf8427e"), - ("a", - "0cc175b9c0f1b6a831c399e269772661"), - ("abc", - "900150983cd24fb0d6963f7d28e17f72"), - ("message digest", - "f96b697d7cb7938d525a2f31aaf161d0"), - ("abcdefghijklmnopqrstuvwxyz", - "c3fcd3d76192e4007dfb496cca67e13b"), - ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "d174ab98d277d9f5a5611c2c9f419d9f"), - ("1234567890"*8, - "57edf4a22be3c955ac49da2e2107b67a"), - ) + ("", + "d41d8cd98f00b204e9800998ecf8427e"), + ("a", + "0cc175b9c0f1b6a831c399e269772661"), + ("abc", + "900150983cd24fb0d6963f7d28e17f72"), + ("message digest", + "f96b697d7cb7938d525a2f31aaf161d0"), + ("abcdefghijklmnopqrstuvwxyz", + "c3fcd3d76192e4007dfb496cca67e13b"), + ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "d174ab98d277d9f5a5611c2c9f419d9f"), + ("1234567890"*8, + "57edf4a22be3c955ac49da2e2107b67a"), + ) - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message, expectedResult = cases[i][0], None if len(cases[i]) == 2: expectedResult = cases[i][1] - printDiff(message, d1, d2, expectedResult) + self.print_diff(message, d1, d2, expectedResult) assert res is None + def test2(self): + """Test cases without known digest result.""" + cases = ( + "123", + "1234", + "12345", + "123456", + "1234567", + "12345678", + "123456789 123456789 123456789 ", + "123456789 123456789 ", + "123456789 123456789 1", + "123456789 123456789 12", + "123456789 123456789 123", + "123456789 123456789 1234", + "123456789 123456789 123456789 1", + "123456789 123456789 123456789 12", + "123456789 123456789 123456789 123", + "123456789 123456789 123456789 1234", + "123456789 123456789 123456789 12345", + "123456789 123456789 123456789 123456", + "123456789 123456789 123456789 1234567", + "123456789 123456789 123456789 12345678", + ) - def test2(self): - "Test cases without known digest result." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None + def test3(self): + """Test cases with long messages (can take a while).""" + cases = ( + (2**10*'a',), + (2**10*'abcd',), + #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium. + ) - def test3(self): - "Test cases with long messages (can take a while)." - - cases = ( - (2**10*'a',), - (2**10*'abcd',), -## (2**20*'a',), ## 1 MB, takes about 160 sec. on a 233 Mhz Pentium. - ) - - for i in xrange(len(cases)): - res = compareImp(cases[i][0]) + for i in range(len(cases)): + res = self.compare(cases[i][0]) if res is not None: d1, d2 = res message = cases[i][0] - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - def test4(self): - "Test cases with increasingly growing message lengths." - + """Test cases with increasingly growing message lengths.""" i = 0 - while i < 2**5: + while i < 2**5: message = i * 'a' - res = compareImp(message) + res = self.compare(message) if res is not None: d1, d2 = res - printDiff(message, d1, d2) + self.print_diff(message, d1, d2) assert res is None - i = i + 1 + i += 1 - - def test5(self): - "Test updating cloned objects." - - cases = ( - "123", - "1234", - "12345", - "123456", - "1234567", - "12345678", - "123456789 123456789 123456789 ", - "123456789 123456789 ", - "123456789 123456789 1", - "123456789 123456789 12", - "123456789 123456789 123", - "123456789 123456789 1234", - "123456789 123456789 123456789 1", - "123456789 123456789 123456789 12", - "123456789 123456789 123456789 123", - "123456789 123456789 123456789 1234", - "123456789 123456789 123456789 12345", - "123456789 123456789 123456789 123456", - "123456789 123456789 123456789 1234567", - "123456789 123456789 123456789 12345678", - ) - - # Load both with same prefix. - prefix1 = 2**10 * 'a' - - m1 = md5.md5() - m1.update(prefix1) - m1c = m1.copy() - - m2 = pymd5.new() - m2.update(prefix1) - m2c = m2.copy() - - # Update and compare... - for i in xrange(len(cases)): - message = cases[i][0] - - m1c.update(message) - d1 = m1c.hexdigest() - - m2c.update(message) - d2 = m2c.hexdigest() - - assert d1 == d2 - - -def test_attributes(): - assert pymd5.digest_size == 16 - assert pymd5.new().digest_size == 16 - assert pymd5.new().digestsize == 16 - assert pymd5.new().block_size == 64 + def test_attributes(self): + _md5 = self._md5 + assert _md5.digest_size == 16 + assert _md5.new().digest_size == 16 + assert _md5.new().digestsize == 16 + assert _md5.new().block_size == 64 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -735,6 +735,10 @@ self.mc.RET() def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + """Loads the shadowstack top in ebx, and returns an integer + that gives the address of the stack top. If this integer doesn't + fit in 32 bits, it will be loaded in r11. + """ rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] @@ -752,6 +756,9 @@ if rx86.fits_in_32bits(rst): self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: + # The integer 'rst' doesn't fit in 32 bits, so we know that + # _load_shadowstack_top_in_ebx() above loaded it in r11. + # Reuse it. Be careful not to overwrite r11 in the middle! self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), ebx.value) # MOV [r11], ebx diff --git a/rpython/jit/tool/test/f.pypylog.bz2 b/rpython/jit/tool/test/f.pypylog.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a982e459b1daa33547576733ccc0b560f99a3f79 GIT binary patch [cut] diff --git a/rpython/jit/tool/test/test_traceviewer.py b/rpython/jit/tool/test/test_traceviewer.py --- a/rpython/jit/tool/test/test_traceviewer.py +++ b/rpython/jit/tool/test/test_traceviewer.py @@ -1,7 +1,7 @@ import math import py from rpython.jit.tool.traceviewer import splitloops, FinalBlock, Block,\ - split_one_loop, postprocess, main, get_gradient_color + split_one_loop, postprocess, main, get_gradient_color, guard_number def test_gradient_color(): @@ -30,6 +30,20 @@ loops = splitloops(data) assert len(loops) == 2 + def test_no_of_loops_hexguards(self): + data = [preparse(""" + # Loop 0 : loop with 39 ops + debug_merge_point('', 0) + guard_class(p4, 141310752, descr=) [p0, p1] + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), preparse(""" + # Loop 1 : loop with 46 ops + p21 = getfield_gc(p4, descr=) + """)] + loops = splitloops(data) + assert len(loops) == 2 + def test_split_one_loop(self): real_loops = [FinalBlock(preparse(""" p21 = getfield_gc(p4, descr=) @@ -50,12 +64,42 @@ assert loop.left.content == '' assert loop.right.content == 'extra' + def test_split_one_loop_hexguards(self): + real_loops = [FinalBlock(preparse(""" + p21 = getfield_gc(p4, descr=) + guard_class(p4, 141310752, descr=) [p0, p1] + """), None), FinalBlock(preparse(""" + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), None)] + real_loops[0].loop_no = 0 + real_loops[1].loop_no = 1 + allloops = real_loops[:] + split_one_loop(real_loops, 'Guard0x10abcdef0', 'extra', 1, guard_number(("0x10abcdef0", "0x")), allloops) + loop = real_loops[1] + assert isinstance(loop, Block) + assert loop.content.endswith('p1]') + loop.left = allloops[loop.left] + loop.right = allloops[loop.right] + assert loop.left.content == '' + assert loop.right.content == 'extra' + def test_postparse(self): real_loops = [FinalBlock("debug_merge_point(' #40 POP_TOP', 0)", None)] postprocess(real_loops, real_loops[:], {}) assert real_loops[0].header.startswith("_runCallbacks, file '/tmp/x/twisted-trunk/twisted/internet/defer.py', line 357") + def test_postparse_new(self): + real_loops = [FinalBlock("debug_merge_point(0, 0, ' #351 LOAD_FAST')", None)] + postprocess(real_loops, real_loops[:], {}) + assert real_loops[0].header.startswith("_optimize_charset. file '/usr/local/Cellar/pypy/2.0-beta2/lib-python/2.7/sre_compile.py'. line 207") + def test_load_actual(self): fname = py.path.local(__file__).join('..', 'data.log.bz2') main(str(fname), False, view=False) # assert did not explode + + def test_load_actual_f(self): + fname = py.path.local(__file__).join('..', 'f.pypylog.bz2') + main(str(fname), False, view=False) + # assert did not explode diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -56,6 +56,18 @@ BOX_COLOR = (128, 0, 96) +GUARDNO_RE = "((0x)?[\da-f]+)" +def guard_number(guardno_match): + if (len(guardno_match) == 1 # ("12354",) + or guardno_match[1] != "0x" # ("12345", None) + ): + return int(guardno_match[0]) + else: # ("0x12ef", "0x") + return int(guardno_match[0], 16) + +def guard_number_string(guardno_match): + return guardno_match[0] # its always the first group + class BasicBlock(object): counter = 0 startlineno = 0 @@ -85,13 +97,15 @@ def set_content(self, content): self._content = content - groups = re.findall('Guard(\d+)', content) + groups = re.findall('Guard' + GUARDNO_RE, content) if not groups: self.first_guard = -1 self.last_guard = -1 else: - self.first_guard = int(groups[0]) - self.last_guard = int(groups[-1]) + # guards can be out of order nowadays + groups = sorted(groups) + self.first_guard = guard_number(groups[0]) + self.last_guard = guard_number(groups[-1]) content = property(get_content, set_content) @@ -197,11 +211,11 @@ _loop.loop_no = no allloops.append(_loop) else: - m = re.search("bridge out of Guard (\d+)", firstline) + m = re.search("bridge out of Guard " + GUARDNO_RE, firstline) assert m - guard_s = 'Guard' + m.group(1) + guard_s = 'Guard' + guard_number_string(m.groups()) split_one_loop(real_loops, guard_s, loop, counter, - int(m.group(1)), allloops) + guard_number(m.groups()), allloops) counter += loop.count("\n") + 2 return real_loops, allloops @@ -211,7 +225,7 @@ memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\('( (.*?))'", loop.content) + m = re.search("debug_merge_point\((?:\d+,\ )*'( (.*?))'", loop.content) if m is None: name = '?' loop.key = '?' @@ -236,7 +250,7 @@ content = loop.content loop.content = "Logfile at %d\n" % loop.startlineno + content loop.postprocess(loops, memo, counts) - + def postprocess(loops, allloops, counts): for loop in allloops: if isinstance(loop, Block): diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -111,6 +111,11 @@ from rpython.jit.metainterp.history import Const return isinstance(_cast_to_box(llbox), Const) + at register_helper(annmodel.SomeBool()) +def box_isint(llbox): + from rpython.jit.metainterp.history import INT + return _cast_to_box(llbox).type == INT + # ------------------------- stats interface --------------------------- @register_helper(annmodel.SomeBool()) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -154,6 +154,15 @@ else: return os.lstat(path.as_bytes()) + + at specialize.argtype(0) +def statvfs(path): + if isinstance(path, str): + return os.statvfs(path) + else: + return os.statvfs(path.as_bytes()) + + @specialize.argtype(0) def unlink(path): if isinstance(path, str): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1698,6 +1698,18 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_stat_variant('lstat', traits) + @registering_if(os, 'fstatvfs') + def register_os_fstatvfs(self): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) + + if hasattr(os, 'statvfs'): + @registering_str_unicode(os.statvfs) + def register_os_statvfs(self, traits): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('statvfs', traits) + + # ------------------------------- os.W* --------------------------------- w_star = ['WCOREDUMP', 'WIFCONTINUED', 'WIFSTOPPED', diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -2,20 +2,22 @@ and os.fstat(). In RPython like in plain Python the stat result can be indexed like a tuple but also exposes the st_xxx attributes. """ -import os, sys + +import os +import sys + from rpython.annotator import model as annmodel -from rpython.tool.pairtype import pairtype -from rpython.tool.sourcetools import func_with_new_name, func_renamer -from rpython.rtyper import extregistry -from rpython.rtyper.extfunc import register_external, extdef -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask -from rpython.rlib.objectmodel import specialize +from rpython.rtyper import extregistry +from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.extfunc import extdef +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.tool import rffi_platform as platform +from rpython.tool.pairtype import pairtype +from rpython.tool.sourcetools import func_renamer from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.annlowlevel import hlstr # Support for float times is here. # - ALL_STAT_FIELDS contains Float fields if the system can retrieve @@ -47,12 +49,26 @@ ("st_flags", lltype.Signed), #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented - ] +] N_INDEXABLE_FIELDS = 10 # For OO backends, expose only the portable fields (the first 10). PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] +STATVFS_FIELDS = [ + ("f_bsize", lltype.Signed), + ("f_frsize", lltype.Signed), + ("f_blocks", lltype.Signed), + ("f_bfree", lltype.Signed), + ("f_bavail", lltype.Signed), + ("f_files", lltype.Signed), + ("f_ffree", lltype.Signed), + ("f_favail", lltype.Signed), + ("f_flag", lltype.Signed), + ("f_namemax", lltype.Signed), +] + + # ____________________________________________________________ # # Annotation support @@ -79,6 +95,7 @@ def stat_result_reduce(st): return (st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7], st[8], st[9]) + def stat_result_recreate(tup): return make_stat_result(tup + extra_zeroes) s_reduced = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) @@ -86,6 +103,26 @@ extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) return s_reduced, stat_result_reduce, stat_result_recreate + +class SomeStatvfsResult(annmodel.SomeObject): + if hasattr(os, 'statvfs_result'): + knowntype = os.statvfs_result + else: + knowntype = None # will not be used + + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.module import r_os_stat + return r_os_stat.StatvfsResultRepr(rtyper) + + def rtyper_makekey_ex(self, rtyper): + return self.__class__, + + def getattr(self, s_attr): + assert s_attr.is_constant() + TYPE = STATVFS_FIELD_TYPES[s_attr.const] + return annmodel.lltype_to_annotation(TYPE) + + class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): def getitem((s_sta, s_int)): assert s_int.is_constant(), "os.stat()[index]: index must be constant" @@ -94,7 +131,17 @@ name, TYPE = STAT_FIELDS[index] return annmodel.lltype_to_annotation(TYPE) + +class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): + def getitem((s_stat, s_int)): + assert s_int.is_constant() + name, TYPE = STATVFS_FIELDS[s_int.const] + return annmodel.lltype_to_annotation(TYPE) + + s_StatResult = SomeStatResult() +s_StatvfsResult = SomeStatvfsResult() + def make_stat_result(tup): """Turn a tuple into an os.stat_result object.""" @@ -104,6 +151,11 @@ kwds[name] = tup[N_INDEXABLE_FIELDS + i] return os.stat_result(positional, kwds) + +def make_statvfs_result(tup): + return os.statvfs_result(tup) + + class MakeStatResultEntry(extregistry.ExtRegistryEntry): _about_ = make_stat_result @@ -114,22 +166,33 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.specialize_make_stat_result(hop) + +class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_statvfs_result + + def compute_result_annotation(self, s_tup): + return s_StatvfsResult + + def specialize_call(self, hop): + from rpython.rtyper.module import r_os_stat + return r_os_stat.specialize_make_statvfs_result(hop) + # ____________________________________________________________ # # RFFI support if sys.platform.startswith('win'): _name_struct_stat = '_stati64' - INCLUDES = ['sys/types.h', 'sys/stat.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] else: _name_struct_stat = 'stat' - INCLUDES = ['sys/types.h', 'sys/stat.h', 'unistd.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] compilation_info = ExternalCompilationInfo( # This must be set to 64 on some systems to enable large file support. #pre_include_bits = ['#define _FILE_OFFSET_BITS 64'], # ^^^ nowadays it's always set in all C files we produce. - includes = INCLUDES + includes=INCLUDES ) if TIMESPEC is not None: @@ -141,7 +204,7 @@ def posix_declaration(try_to_add=None): - global STAT_STRUCT + global STAT_STRUCT, STATVFS_STRUCT LL_STAT_FIELDS = STAT_FIELDS[:] if try_to_add: @@ -173,15 +236,17 @@ class CConfig: _compilation_info_ = compilation_info STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) + STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS) + try: - config = platform.configure(CConfig, ignore_errors= - try_to_add is not None) + config = platform.configure(CConfig, ignore_errors=try_to_add is not None) except platform.CompilationError: if try_to_add: return # failed to add this field, give up raise STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) + STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT']) if try_to_add: STAT_FIELDS.append(try_to_add) @@ -202,6 +267,9 @@ STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] del _name, _TYPE +STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) +STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] + def build_stat_result(st): # only for LL backends @@ -233,6 +301,21 @@ return make_stat_result(result) +def build_statvfs_result(st): + return make_statvfs_result(( + st.c_f_bsize, + st.c_f_frsize, + st.c_f_blocks, + st.c_f_bfree, + st.c_f_bavail, + st.c_f_files, + st.c_f_ffree, + st.c_f_favail, + st.c_f_flag, + st.c_f_namemax + )) + + def register_stat_variant(name, traits): if name != 'fstat': arg_is_path = True @@ -301,6 +384,56 @@ [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl) + +def register_statvfs_variant(name, traits): + if name != 'fstatvfs': + arg_is_path = True + s_arg = traits.str0 + ARG1 = traits.CCHARP + else: + arg_is_path = False + s_arg = int + ARG1 = rffi.INT + + posix_mystatvfs = rffi.llexternal(name, + [ARG1, STATVFS_STRUCT], rffi.INT, + compilation_info=compilation_info + ) + + @func_renamer('os_%s_llimpl' % (name,)) + def posix_statvfs_llimpl(arg): + stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw') + try: + if arg_is_path: + arg = traits.str2charp(arg) + error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult)) + if arg_is_path: + traits.free_charp(arg) + if error != 0: + raise OSError(rposix.get_errno(), "os_?statvfs failed") + return build_statvfs_result(stresult) + finally: + lltype.free(stresult, flavor='raw') + + @func_renamer('os_%s_fake' % (name,)) + def posix_fakeimpl(arg): + if s_arg == traits.str0: + arg = hlstr(arg) + st = getattr(os, name)(arg) + fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS] + TP = TUPLE_TYPE(fields) + ll_tup = lltype.malloc(TP.TO) + for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS): + val = getattr(st, fieldname) + rffi.setintfield(ll_tup, 'item%d' % i, int(val)) + return ll_tup + + return extdef( + [s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,), + llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl + ) + + def make_win32_stat_impl(name, traits): from rpython.rlib import rwin32 from rpython.rtyper.module.ll_win32file import make_win32_traits diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -67,3 +67,52 @@ # no-op conversion from r_StatResult.r_tuple to r_StatResult hop.exception_cannot_occur() return v_result + + +class StatvfsResultRepr(Repr): + + def __init__(self, rtyper): + self.rtyper = rtyper + self.statvfs_fields = ll_os_stat.STATVFS_FIELDS + + self.statvfs_field_indexes = {} + for i, (name, TYPE) in enumerate(self.statvfs_fields): + self.statvfs_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + for name, TYPE in self.statvfs_fields]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + hop2.forced_opname = 'getitem' + hop2.args_v = [hop2.args_v[0], Constant(index)] + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.statvfs_field_indexes[attr] + except KeyError: + raise TyperError("os.statvfs().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + + +def specialize_make_statvfs_result(hop): + r_StatvfsResult = hop.rtyper.getrepr(ll_os_stat.s_StatvfsResult) + [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) + hop.exception_cannot_occur() + return v_result diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -46,6 +46,26 @@ data = getllimpl(os.getlogin)() assert data == expected +def test_statvfs(): + if not hasattr(os, 'statvfs'): + py.test.skip('posix specific function') + try: + expected = os.statvfs('.') + except OSError, e: + py.test.skip("the underlying os.statvfs() failed: %s" % e) + data = getllimpl(os.statvfs)('.') + assert data == expected + +def test_fstatvfs(): + if not hasattr(os, 'fstatvfs'): + py.test.skip('posix specific function') + try: + expected = os.fstatvfs(0) + except OSError, e: + py.test.skip("the underlying os.fstatvfs() failed: %s" % e) + data = getllimpl(os.fstatvfs)(0) + assert data == expected + def test_utimes(): if os.name != 'nt': py.test.skip('Windows specific feature') diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -478,7 +478,7 @@ 'rep', 'movs', 'movhp', 'lods', 'stos', 'scas', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', - 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', + 'cvt', 'ucomi', 'comi', 'subs', 'subp', 'adds', 'addp', 'xorp', 'movap', 'movd', 'movlp', 'movup', 'sqrt', 'rsqrt', 'movhlp', 'movlhp', 'mins', 'minp', 'maxs', 'maxp', 'unpck', 'pxor', 'por', # sse2 'shufps', 'shufpd', @@ -495,13 +495,15 @@ # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers - 'movz', + 'movz', # locked operations should not move GC pointers, at least so far 'lock', 'pause', # non-temporal moves should be reserved for areas containing # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', - ]) + # bit manipulations + 'bextr', + ]) # a partial list is hopefully good enough for now; it's all to support # only one corner case, tested in elf64/track_zero.s @@ -741,7 +743,7 @@ # tail-calls are equivalent to RET for us return InsnRet(self.CALLEE_SAVE_REGISTERS) return InsnStop("jump") - + def register_jump_to(self, label, lastinsn=None): if lastinsn is None: lastinsn = self.insns[-1] @@ -1020,7 +1022,7 @@ visit_movl = visit_mov visit_xorl = _maybe_32bit_dest(FunctionGcRootTracker.binary_insn) - + visit_pushq = FunctionGcRootTracker._visit_push visit_addq = FunctionGcRootTracker._visit_add From noreply at buildbot.pypy.org Thu Jul 25 22:45:55 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:55 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: fix merge, typos Message-ID: <20130725204555.661421C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65671:7d2d7703eb8b Date: 2013-07-25 22:20 +0200 http://bitbucket.org/pypy/pypy/changeset/7d2d7703eb8b/ Log: fix merge, typos diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -32,13 +32,12 @@ .. branch: ssl_moving_write_buffer -<<<<<<< local .. branch: pythoninspect-fix Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process to start interactive prompt when the script execution finishes. This adds new __pypy__.os.real_getenv call that bypasses Python cache and looksup env in the underlying OS. Translatorshell now works on PyPy. -======= + .. branch: add-statvfs Added os.statvfs and os.fstatvfs diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -710,7 +710,7 @@ try: import __pypy__ except ImportError: - py.test.skip("This can be only tested on PyPy with get_realenv") + py.test.skip("This can be only tested on PyPy with real_getenv") # should be noninteractive when piped in data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' From noreply at buildbot.pypy.org Thu Jul 25 22:45:56 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 25 Jul 2013 22:45:56 +0200 (CEST) Subject: [pypy-commit] pypy pythoninspect-fix: argh Message-ID: <20130725204556.A53D61C0149@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: pythoninspect-fix Changeset: r65672:e026c95e2f05 Date: 2013-07-25 22:25 +0200 http://bitbucket.org/pypy/pypy/changeset/e026c95e2f05/ Log: argh diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,4 +43,3 @@ .. branch: statvfs_tests Added some addition tests for statvfs. ->>>>>>> other From noreply at buildbot.pypy.org Thu Jul 25 22:45:57 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 25 Jul 2013 22:45:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in squeaky/pypy/pythoninspect-fix (pull request #168) Message-ID: <20130725204557.E1EDC1C0149@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65673:eec382ffd965 Date: 2013-07-25 13:45 -0700 http://bitbucket.org/pypy/pypy/changeset/eec382ffd965/ Log: Merged in squeaky/pypy/pythoninspect-fix (pull request #168) Fix PYTHONINSPECT behaviour diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -32,6 +32,12 @@ .. branch: ssl_moving_write_buffer +.. branch: pythoninspect-fix +Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process +to start interactive prompt when the script execution finishes. This adds +new __pypy__.os.real_getenv call that bypasses Python cache and looksup env +in the underlying OS. Translatorshell now works on PyPy. + .. branch: add-statvfs Added os.statvfs and os.fstatvfs diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -556,8 +556,15 @@ # or # * PYTHONINSPECT is set and stdin is a tty. # + try: + # we need a version of getenv that bypasses Python caching + from __pypy__.os import real_getenv + except ImportError: + # dont fail on CPython here + real_getenv = os.getenv + return (interactive or - ((inspect or (readenv and os.getenv('PYTHONINSPECT'))) + ((inspect or (readenv and real_getenv('PYTHONINSPECT'))) and sys.stdin.isatty())) success = True diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -48,7 +48,7 @@ pdir = _get_next_path(ext='') p = pdir.ensure(dir=1).join('__main__.py') p.write(str(py.code.Source(source))) - # return relative path for testing purposes + # return relative path for testing purposes return py.path.local().bestrelpath(pdir) demo_script = getscript(""" @@ -706,6 +706,20 @@ assert 'hello world\n' in data assert '42\n' in data + def test_putenv_fires_interactive_within_process(self): + try: + import __pypy__ + except ImportError: + py.test.skip("This can be only tested on PyPy with real_getenv") + + # should be noninteractive when piped in + data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' + self.run('', senddata=data, expect_prompt=False) + + # should go interactive with -c + data = data.replace('\n', ';') + self.run("-c '%s'" % data, expect_prompt=True) + def test_option_S_copyright(self): data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data @@ -971,7 +985,7 @@ pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') app_main.setup_bootstrap_path(pypy_c) newpath = sys.path[:] - # we get at least lib_pypy + # we get at least lib_pypy # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) assert len(newpath) >= 2 for p in newpath: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -50,6 +50,13 @@ } +class OsModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'real_getenv': 'interp_os.real_getenv' + } + + class Module(MixedModule): appleveldefs = { } @@ -82,6 +89,7 @@ "time": TimeModule, "thread": ThreadModule, "intop": IntOpModule, + "os": OsModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_os.py @@ -0,0 +1,9 @@ +import os + +from pypy.interpreter.gateway import unwrap_spec + + + at unwrap_spec(name='str0') +def real_getenv(space, name): + """Get an OS environment value skipping Python cache""" + return space.wrap(os.environ.get(name)) diff --git a/pypy/module/__pypy__/test/test_os.py b/pypy/module/__pypy__/test/test_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_os.py @@ -0,0 +1,16 @@ +class AppTestOs: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_real_getenv(self): + import __pypy__.os + import os + + key = 'UNLIKELY_SET' + assert key not in os.environ + os.putenv(key, '42') + # this one skips Python cache + assert __pypy__.os.real_getenv(key) == '42' + # this one can only see things set on interpter start (cached) + assert os.getenv(key) is None + os.unsetenv(key) + assert __pypy__.os.real_getenv(key) is None From noreply at buildbot.pypy.org Thu Jul 25 23:53:45 2013 From: noreply at buildbot.pypy.org (matti) Date: Thu, 25 Jul 2013 23:53:45 +0200 (CEST) Subject: [pypy-commit] cffi windows: un-tabify, function call needs at least 40 bytes stack size Message-ID: <20130725215345.231C41C13FC@cobra.cs.uni-duesseldorf.de> Author: matti Branch: windows Changeset: r1294:7cc661ada4a5 Date: 2013-07-25 23:34 +0300 http://bitbucket.org/cffi/cffi/changeset/7cc661ada4a5/ Log: un-tabify, function call needs at least 40 bytes stack size diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3987,8 +3987,13 @@ if (cif_descr != NULL) { /* exchange data size */ +#ifdef _WIN64 + cif_descr->exchange_size = exchange_offset<40? 40 : exchange_offset; +#else cif_descr->exchange_size = exchange_offset; - } +#endif + } + return 0; } diff --git a/c/libffi_msvc/ffi.c b/c/libffi_msvc/ffi.c --- a/c/libffi_msvc/ffi.c +++ b/c/libffi_msvc/ffi.c @@ -62,46 +62,46 @@ /* Align if necessary */ if ((sizeof(void *) - 1) & (size_t) argp) - argp = (char *) ALIGN(argp, sizeof(void *)); + argp = (char *) ALIGN(argp, sizeof(void *)); z = (*p_arg)->size; if (z < sizeof(int)) - { - z = sizeof(int); - switch ((*p_arg)->type) - { - case FFI_TYPE_SINT8: - *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); - break; + { + z = sizeof(int); + switch ((*p_arg)->type) + { + case FFI_TYPE_SINT8: + *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv); + break; - case FFI_TYPE_UINT8: - *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); - break; + case FFI_TYPE_UINT8: + *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv); + break; - case FFI_TYPE_SINT16: - *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); - break; + case FFI_TYPE_SINT16: + *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv); + break; - case FFI_TYPE_UINT16: - *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); - break; + case FFI_TYPE_UINT16: + *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv); + break; - case FFI_TYPE_SINT32: - *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv); - break; + case FFI_TYPE_SINT32: + *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv); + break; - case FFI_TYPE_UINT32: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); - break; + case FFI_TYPE_UINT32: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; - case FFI_TYPE_STRUCT: - *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); - break; + case FFI_TYPE_STRUCT: + *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv); + break; - default: - FFI_ASSERT(0); - } - } + default: + FFI_ASSERT(0); + } + } #ifdef _WIN64 else if (z > 8) { @@ -112,9 +112,9 @@ } #endif else - { - memcpy(argp, *p_argv, z); - } + { + memcpy(argp, *p_argv, z); + } p_argv++; argp += z; } @@ -170,34 +170,34 @@ #ifdef _WIN32 extern int ffi_call_x86(void (*)(char *, extended_cif *), - /*@out@*/ extended_cif *, - unsigned, unsigned, - /*@out@*/ unsigned *, - void (*fn)()); + /*@out@*/ extended_cif *, + unsigned, unsigned, + /*@out@*/ unsigned *, + void (*fn)()); #endif #ifdef _WIN64 extern int ffi_call_AMD64(void (*)(char *, extended_cif *), - /*@out@*/ extended_cif *, - unsigned, unsigned, - /*@out@*/ unsigned *, - void (*fn)()); + /*@out@*/ extended_cif *, + unsigned, unsigned, + /*@out@*/ unsigned *, + void (*fn)()); #endif int ffi_call(/*@dependent@*/ ffi_cif *cif, - void (*fn)(), - /*@out@*/ void *rvalue, - /*@dependent@*/ void **avalue) + void (*fn)(), + /*@out@*/ void *rvalue, + /*@dependent@*/ void **avalue) { extended_cif ecif; ecif.cif = cif; ecif.avalue = avalue; - /* If the return value is a struct and we don't have a return */ - /* value address then we need to make one */ + /* If the return value is a struct and we don't have a return */ + /* value address then we need to make one */ if ((rvalue == NULL) && (cif->flags == FFI_TYPE_STRUCT)) @@ -216,14 +216,14 @@ case FFI_SYSV: case FFI_STDCALL: return ffi_call_x86(ffi_prep_args, &ecif, cif->bytes, - cif->flags, ecif.rvalue, fn); + cif->flags, ecif.rvalue, fn); break; #else case FFI_SYSV: /*@-usedef@*/ /* Function call needs at least 40 bytes stack size, on win64 AMD64 */ return ffi_call_AMD64(ffi_prep_args, &ecif, cif->bytes ? cif->bytes : 40, - cif->flags, ecif.rvalue, fn); + cif->flags, ecif.rvalue, fn); /*@=usedef@*/ break; #endif @@ -239,7 +239,7 @@ /** private members **/ static void ffi_prep_incoming_args_SYSV (char *stack, void **ret, - void** args, ffi_cif* cif); + void** args, ffi_cif* cif); /* This function is jumped to by the trampoline */ #ifdef _WIN64 @@ -279,19 +279,19 @@ /* now, do a generic return based on the value of rtype */ if (rtype == FFI_TYPE_INT) { - _asm mov eax, resp ; - _asm mov eax, [eax] ; + _asm mov eax, resp ; + _asm mov eax, [eax] ; } else if (rtype == FFI_TYPE_FLOAT) { - _asm mov eax, resp ; - _asm fld DWORD PTR [eax] ; + _asm mov eax, resp ; + _asm fld DWORD PTR [eax] ; // asm ("flds (%0)" : : "r" (resp) : "st" ); } else if (rtype == FFI_TYPE_DOUBLE) { - _asm mov eax, resp ; - _asm fld QWORD PTR [eax] ; + _asm mov eax, resp ; + _asm fld QWORD PTR [eax] ; // asm ("fldl (%0)" : : "r" (resp) : "st", "st(1)" ); } else if (rtype == FFI_TYPE_LONGDOUBLE) @@ -300,13 +300,13 @@ } else if (rtype == FFI_TYPE_SINT64) { - _asm mov edx, resp ; - _asm mov eax, [edx] ; - _asm mov edx, [edx + 4] ; + _asm mov edx, resp ; + _asm mov eax, [edx] ; + _asm mov edx, [edx + 4] ; // asm ("movl 0(%0),%%eax;" -// "movl 4(%0),%%edx" -// : : "r"(resp) -// : "eax", "edx"); +// "movl 4(%0),%%edx" +// : : "r"(resp) +// : "eax", "edx"); } #else /* now, do a generic return based on the value of rtype */ @@ -329,9 +329,9 @@ else if (rtype == FFI_TYPE_SINT64) { asm ("movl 0(%0),%%eax;" - "movl 4(%0),%%edx" - : : "r"(resp) - : "eax", "edx"); + "movl 4(%0),%%edx" + : : "r"(resp) + : "eax", "edx"); } #endif #endif @@ -348,7 +348,7 @@ /*@-exportheader@*/ static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, - void **avalue, ffi_cif *cif) + void **avalue, ffi_cif *cif) /*@=exportheader@*/ { register unsigned int i; @@ -371,7 +371,7 @@ /* Align if necessary */ if ((sizeof(char *) - 1) & (size_t) argp) { - argp = (char *) ALIGN(argp, sizeof(char*)); + argp = (char *) ALIGN(argp, sizeof(char*)); } z = (*p_arg)->size; @@ -392,10 +392,10 @@ ffi_status ffi_prep_closure_loc (ffi_closure* closure, - ffi_cif* cif, - void (*fun)(ffi_cif*,void*,void**,void*), - void *user_data, - void *codeloc) + ffi_cif* cif, + void (*fun)(ffi_cif*,void*,void**,void*), + void *user_data, + void *codeloc) { short bytes; char *tramp; @@ -441,7 +441,7 @@ /* 41 BB ---- mov r11d,mask */ BYTES("\x41\xBB"); INT(mask); - /* 48 B8 -------- mov rax, closure */ + /* 48 B8 -------- mov rax, closure */ BYTES("\x48\xB8"); POINTER(closure); /* 49 BA -------- mov r10, ffi_closure_OUTER */ From noreply at buildbot.pypy.org Thu Jul 25 23:53:46 2013 From: noreply at buildbot.pypy.org (matti) Date: Thu, 25 Jul 2013 23:53:46 +0200 (CEST) Subject: [pypy-commit] cffi windows: another place where function call needs at least 40 bytes stack size Message-ID: <20130725215346.558731C13FC@cobra.cs.uni-duesseldorf.de> Author: matti Branch: windows Changeset: r1295:55d1624ba4be Date: 2013-07-26 00:46 +0300 http://bitbucket.org/cffi/cffi/changeset/55d1624ba4be/ Log: another place where function call needs at least 40 bytes stack size diff --git a/c/libffi_msvc/prep_cif.c b/c/libffi_msvc/prep_cif.c --- a/c/libffi_msvc/prep_cif.c +++ b/c/libffi_msvc/prep_cif.c @@ -161,13 +161,16 @@ /* Add any padding if necessary */ if (((*ptr)->alignment - 1) & bytes) bytes = ALIGN(bytes, (*ptr)->alignment); - + #endif bytes += STACK_ARG_SIZE((*ptr)->size); } #endif } +#if defined(_MSC_VER) && defined(_WIN64) + bytes = bytes < 40 ? 40 : bytes; +#endif cif->bytes = bytes; /* Perform machine dependent cif processing */ From noreply at buildbot.pypy.org Fri Jul 26 02:03:15 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 26 Jul 2013 02:03:15 +0200 (CEST) Subject: [pypy-commit] pypy default: add a wrap_dlopenerror for consistency's sake Message-ID: <20130726000315.E147C1C142B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65674:29a9e38aedb6 Date: 2013-07-25 16:26 -0700 http://bitbucket.org/pypy/pypy/changeset/29a9e38aedb6/ Log: add a wrap_dlopenerror for consistency's sake diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -4,6 +4,7 @@ from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.module._rawffi.interp_rawffi import wrap_dlopenerror from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rdynload import DLLHANDLE, dlopen, dlsym, dlclose, DLOpenError @@ -24,9 +25,7 @@ try: self.handle = dlopen(ll_libname, flags) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, - "cannot load library %s: %s", - filename, e.msg) + raise wrap_dlopenerror(space, e, filename) self.name = filename def __del__(self): diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_ffi/interp_funcptr.py @@ -14,7 +14,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error +from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os if os.name == 'nt': @@ -324,8 +324,7 @@ try: self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', self.name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, self.name) def getfunc(self, space, w_name, w_argtypes, w_restype): return _getfunc(space, self, w_name, w_argtypes, w_restype) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -140,6 +140,11 @@ raise OperationError(space.w_SystemError, space.wrap("not supported by libffi")) +def wrap_dlopenerror(space, e, filename): + msg = e.msg if e.msg else 'unspecified error' + return operationerrfmt(space.w_OSError, 'Cannot load library %s: %s', + filename, msg) + class W_CDLL(W_Root): def __init__(self, space, name, cdll): @@ -219,8 +224,7 @@ try: cdll = CDLL(name) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, name) except OSError, e: raise wrap_oserror(space, e) return space.wrap(W_CDLL(space, name, cdll)) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -223,7 +223,8 @@ _rawffi.CDLL("xxxxx_this_name_does_not_exist_xxxxx") except OSError, e: print e - assert str(e).startswith("xxxxx_this_name_does_not_exist_xxxxx: ") + assert str(e).startswith( + "Cannot load library xxxxx_this_name_does_not_exist_xxxxx: ") else: raise AssertionError("did not fail??") From noreply at buildbot.pypy.org Fri Jul 26 02:03:18 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 26 Jul 2013 02:03:18 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130726000318.142741C142B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65675:c8db1997988c Date: 2013-07-25 16:40 -0700 http://bitbucket.org/pypy/pypy/changeset/c8db1997988c/ Log: merge default diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,62 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +Highlights +========== + +* Support for os.statvfs and os.fstatvfs on unix systems. + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +* During packaging, compile the CFFI tk extension. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -31,3 +31,15 @@ more precise information about which functions can be called. Needed for Topaz. .. branch: ssl_moving_write_buffer + +.. branch: pythoninspect-fix +Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process +to start interactive prompt when the script execution finishes. This adds +new __pypy__.os.real_getenv call that bypasses Python cache and looksup env +in the underlying OS. Translatorshell now works on PyPy. + +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -550,8 +550,15 @@ # or # * PYTHONINSPECT is set and stdin is a tty. # + try: + # we need a version of getenv that bypasses Python caching + from __pypy__.os import real_getenv + except ImportError: + # dont fail on CPython here + real_getenv = os.getenv + return (interactive or - ((inspect or (readenv and os.getenv('PYTHONINSPECT'))) + ((inspect or (readenv and real_getenv('PYTHONINSPECT'))) and sys.stdin.isatty())) success = True diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -58,7 +58,7 @@ pdir = _get_next_path(ext='') p = pdir.ensure(dir=1).join('__main__.py') p.write(str(py.code.Source(source))) - # return relative path for testing purposes + # return relative path for testing purposes return py.path.local().bestrelpath(pdir) def pytest_funcarg__demo_script(request): @@ -768,6 +768,20 @@ expect_prompt=True, expect_banner=False) assert '42\n' in data + def test_putenv_fires_interactive_within_process(self): + try: + import __pypy__ + except ImportError: + py.test.skip("This can be only tested on PyPy with real_getenv") + + # should be noninteractive when piped in + data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' + self.run('', senddata=data, expect_prompt=False) + + # should go interactive with -c + data = data.replace('\n', ';') + self.run("-c '%s'" % data, expect_prompt=True) + def test_option_S_copyright(self): data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data @@ -1090,7 +1104,7 @@ pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') app_main.setup_bootstrap_path(pypy_c) newpath = sys.path[:] - # we get at least lib_pypy + # we get at least lib_pypy # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) assert len(newpath) >= 2 for p in newpath: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -50,6 +50,13 @@ } +class OsModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'real_getenv': 'interp_os.real_getenv' + } + + class Module(MixedModule): appleveldefs = { } @@ -82,6 +89,7 @@ "time": TimeModule, "thread": ThreadModule, "intop": IntOpModule, + "os": OsModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_os.py @@ -0,0 +1,9 @@ +import os + +from pypy.interpreter.gateway import unwrap_spec + + + at unwrap_spec(name='str0') +def real_getenv(space, name): + """Get an OS environment value skipping Python cache""" + return space.wrap(os.environ.get(name)) diff --git a/pypy/module/__pypy__/test/test_os.py b/pypy/module/__pypy__/test/test_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_os.py @@ -0,0 +1,16 @@ +class AppTestOs: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_real_getenv(self): + import __pypy__.os + import os + + key = 'UNLIKELY_SET' + assert key not in os.environ + os.putenv(key, '42') + # this one skips Python cache + assert __pypy__.os.real_getenv(key) == '42' + # this one can only see things set on interpter start (cached) + assert os.getenv(key) is None + os.unsetenv(key) + assert __pypy__.os.real_getenv(key) is None diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -4,6 +4,7 @@ from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.module._rawffi.interp_rawffi import wrap_dlopenerror from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rdynload import DLLHANDLE, dlopen, dlsym, dlclose, DLOpenError @@ -24,9 +25,7 @@ try: self.handle = dlopen(ll_libname, flags) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, - "cannot load library %s: %s", - filename, e.msg) + raise wrap_dlopenerror(space, e, filename) self.name = filename def __del__(self): diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_ffi/interp_funcptr.py @@ -14,7 +14,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error +from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os if os.name == 'nt': @@ -325,8 +325,7 @@ try: self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', self.name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, self.name) def getfunc(self, space, w_name, w_argtypes, w_restype): return _getfunc(space, self, w_name, w_argtypes, w_restype) diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -150,7 +150,7 @@ return self.do_and_wrap(w_ffitype) -class TestFromAppLevel(object): +class TestToAppLevel(object): spaceconfig = dict(usemodules=('_ffi',)) def setup_class(cls): diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -141,6 +141,11 @@ raise OperationError(space.w_SystemError, space.wrap("not supported by libffi")) +def wrap_dlopenerror(space, e, filename): + msg = e.msg if e.msg else 'unspecified error' + return operationerrfmt(space.w_OSError, 'Cannot load library %s: %s', + filename, msg) + class W_CDLL(W_Root): def __init__(self, space, name, cdll): @@ -220,8 +225,7 @@ try: cdll = CDLL(name) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, name) except OSError, e: raise wrap_oserror(space, e) return space.wrap(W_CDLL(space, name, cdll)) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -223,7 +223,8 @@ _rawffi.CDLL("xxxxx_this_name_does_not_exist_xxxxx") except OSError as e: print(e) - assert str(e).startswith("xxxxx_this_name_does_not_exist_xxxxx: ") + assert str(e).startswith( + "Cannot load library xxxxx_this_name_does_not_exist_xxxxx: ") else: raise AssertionError("did not fail??") diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -32,18 +33,18 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'urandom': 'app_posix.urandom', - 'statvfs_result': 'app_posix.statvfs_result', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'urandom': 'app_posix.urandom', } if os.name == 'nt': del appleveldefs['urandom'] # at interp on win32 appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -51,45 +52,47 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdb' : 'interp_posix.getcwdb', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', - 'device_encoding' : 'interp_posix.device_encoding', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdb': 'interp_posix.getcwdb', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', + 'device_encoding': 'interp_posix.device_encoding', } if hasattr(os, 'chown'): @@ -166,9 +169,9 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: @@ -176,7 +179,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -185,7 +188,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -193,7 +196,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,14 +1,16 @@ -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -203,6 +205,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -243,6 +246,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -304,6 +317,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstatvfs(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -168,7 +168,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -182,6 +183,16 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + if hasattr(__import__(os.name), "statvfs"): + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,6 +125,9 @@ self.llbox = llbox def descr_getint(self, space): + if not jit_hooks.box_isint(self.llbox): + raise OperationError(space.w_NotImplementedError, + space.wrap("Box has no int value")) return space.wrap(jit_hooks.box_getint(self.llbox)) @unwrap_spec(no=int) @@ -182,7 +185,12 @@ @unwrap_spec(no=int) def descr_getarg(self, space, no): - return WrappedBox(jit_hooks.resop_getarg(self.op, no)) + try: + box = jit_hooks.resop_getarg(self.op, no) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("Index out of range")) + return WrappedBox(box) @unwrap_spec(no=int, w_box=WrappedBox) def descr_setarg(self, space, no, w_box): @@ -232,7 +240,8 @@ getarg = interp2app(WrappedOp.descr_getarg), setarg = interp2app(WrappedOp.descr_setarg), result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult) + WrappedOp.descr_setresult), + offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.acceptable_as_base_class = False @@ -342,6 +351,10 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), + asmaddr = interp_attrproperty('asmaddr', cls=W_JitLoopInfo, + doc="Address of machine code"), + asmlen = interp_attrproperty('asmlen', cls=W_JitLoopInfo, + doc="Length of machine code"), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -71,7 +71,7 @@ greenkey) di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'loop', greenkey) - di_loop.asminfo = AsmInfo(offset, 0, 0) + di_loop.asminfo = AsmInfo(offset, 0x42, 12) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'bridge', fail_descr=BasicFailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) @@ -123,6 +123,8 @@ assert info.greenkey[2] == False assert info.loop_no == 0 assert info.type == 'loop' + assert info.asmaddr == 0x42 + assert info.asmlen == 12 raises(TypeError, 'info.bridge_no') assert len(info.operations) == 4 int_add = info.operations[0] @@ -132,8 +134,10 @@ assert dmp.greenkey == (self.f.__code__, 0, False) assert dmp.call_depth == 0 assert dmp.call_id == 0 + assert dmp.offset == -1 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num + assert int_add.offset == 0 self.on_compile_bridge() expected = ('>' % repr(self.f.__code__)) @@ -161,6 +165,20 @@ assert 'jit hook' in s.getvalue() assert 'ZeroDivisionError' in s.getvalue() + def test_on_compile_crashes(self): + import pypyjit + loops = [] + def hook(loop): + loops.append(loop) + pypyjit.set_compile_hook(hook) + self.on_compile() + loop = loops[0] + op = loop.operations[2] + # Should not crash the interpreter + raises(IndexError, op.getarg, 2) + assert op.name == 'guard_nonnull' + raises(NotImplementedError, op.getarg(0).getint) + def test_non_reentrant(self): import pypyjit l = [] diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -735,6 +735,10 @@ self.mc.RET() def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + """Loads the shadowstack top in ebx, and returns an integer + that gives the address of the stack top. If this integer doesn't + fit in 32 bits, it will be loaded in r11. + """ rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] @@ -752,6 +756,9 @@ if rx86.fits_in_32bits(rst): self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: + # The integer 'rst' doesn't fit in 32 bits, so we know that + # _load_shadowstack_top_in_ebx() above loaded it in r11. + # Reuse it. Be careful not to overwrite r11 in the middle! self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), ebx.value) # MOV [r11], ebx diff --git a/rpython/jit/tool/test/f.pypylog.bz2 b/rpython/jit/tool/test/f.pypylog.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a982e459b1daa33547576733ccc0b560f99a3f79 GIT binary patch [cut] diff --git a/rpython/jit/tool/test/test_traceviewer.py b/rpython/jit/tool/test/test_traceviewer.py --- a/rpython/jit/tool/test/test_traceviewer.py +++ b/rpython/jit/tool/test/test_traceviewer.py @@ -1,7 +1,7 @@ import math import py from rpython.jit.tool.traceviewer import splitloops, FinalBlock, Block,\ - split_one_loop, postprocess, main, get_gradient_color + split_one_loop, postprocess, main, get_gradient_color, guard_number def test_gradient_color(): @@ -30,6 +30,20 @@ loops = splitloops(data) assert len(loops) == 2 + def test_no_of_loops_hexguards(self): + data = [preparse(""" + # Loop 0 : loop with 39 ops + debug_merge_point('', 0) + guard_class(p4, 141310752, descr=) [p0, p1] + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), preparse(""" + # Loop 1 : loop with 46 ops + p21 = getfield_gc(p4, descr=) + """)] + loops = splitloops(data) + assert len(loops) == 2 + def test_split_one_loop(self): real_loops = [FinalBlock(preparse(""" p21 = getfield_gc(p4, descr=) @@ -50,12 +64,42 @@ assert loop.left.content == '' assert loop.right.content == 'extra' + def test_split_one_loop_hexguards(self): + real_loops = [FinalBlock(preparse(""" + p21 = getfield_gc(p4, descr=) + guard_class(p4, 141310752, descr=) [p0, p1] + """), None), FinalBlock(preparse(""" + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), None)] + real_loops[0].loop_no = 0 + real_loops[1].loop_no = 1 + allloops = real_loops[:] + split_one_loop(real_loops, 'Guard0x10abcdef0', 'extra', 1, guard_number(("0x10abcdef0", "0x")), allloops) + loop = real_loops[1] + assert isinstance(loop, Block) + assert loop.content.endswith('p1]') + loop.left = allloops[loop.left] + loop.right = allloops[loop.right] + assert loop.left.content == '' + assert loop.right.content == 'extra' + def test_postparse(self): real_loops = [FinalBlock("debug_merge_point(' #40 POP_TOP', 0)", None)] postprocess(real_loops, real_loops[:], {}) assert real_loops[0].header.startswith("_runCallbacks, file '/tmp/x/twisted-trunk/twisted/internet/defer.py', line 357") + def test_postparse_new(self): + real_loops = [FinalBlock("debug_merge_point(0, 0, ' #351 LOAD_FAST')", None)] + postprocess(real_loops, real_loops[:], {}) + assert real_loops[0].header.startswith("_optimize_charset. file '/usr/local/Cellar/pypy/2.0-beta2/lib-python/2.7/sre_compile.py'. line 207") + def test_load_actual(self): fname = py.path.local(__file__).join('..', 'data.log.bz2') main(str(fname), False, view=False) # assert did not explode + + def test_load_actual_f(self): + fname = py.path.local(__file__).join('..', 'f.pypylog.bz2') + main(str(fname), False, view=False) + # assert did not explode diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -56,6 +56,18 @@ BOX_COLOR = (128, 0, 96) +GUARDNO_RE = "((0x)?[\da-f]+)" +def guard_number(guardno_match): + if (len(guardno_match) == 1 # ("12354",) + or guardno_match[1] != "0x" # ("12345", None) + ): + return int(guardno_match[0]) + else: # ("0x12ef", "0x") + return int(guardno_match[0], 16) + +def guard_number_string(guardno_match): + return guardno_match[0] # its always the first group + class BasicBlock(object): counter = 0 startlineno = 0 @@ -85,13 +97,15 @@ def set_content(self, content): self._content = content - groups = re.findall('Guard(\d+)', content) + groups = re.findall('Guard' + GUARDNO_RE, content) if not groups: self.first_guard = -1 self.last_guard = -1 else: - self.first_guard = int(groups[0]) - self.last_guard = int(groups[-1]) + # guards can be out of order nowadays + groups = sorted(groups) + self.first_guard = guard_number(groups[0]) + self.last_guard = guard_number(groups[-1]) content = property(get_content, set_content) @@ -197,11 +211,11 @@ _loop.loop_no = no allloops.append(_loop) else: - m = re.search("bridge out of Guard (\d+)", firstline) + m = re.search("bridge out of Guard " + GUARDNO_RE, firstline) assert m - guard_s = 'Guard' + m.group(1) + guard_s = 'Guard' + guard_number_string(m.groups()) split_one_loop(real_loops, guard_s, loop, counter, - int(m.group(1)), allloops) + guard_number(m.groups()), allloops) counter += loop.count("\n") + 2 return real_loops, allloops @@ -211,7 +225,7 @@ memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\('( (.*?))'", loop.content) + m = re.search("debug_merge_point\((?:\d+,\ )*'( (.*?))'", loop.content) if m is None: name = '?' loop.key = '?' @@ -236,7 +250,7 @@ content = loop.content loop.content = "Logfile at %d\n" % loop.startlineno + content loop.postprocess(loops, memo, counts) - + def postprocess(loops, allloops, counts): for loop in allloops: if isinstance(loop, Block): diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -111,6 +111,11 @@ from rpython.jit.metainterp.history import Const return isinstance(_cast_to_box(llbox), Const) + at register_helper(annmodel.SomeBool()) +def box_isint(llbox): + from rpython.jit.metainterp.history import INT + return _cast_to_box(llbox).type == INT + # ------------------------- stats interface --------------------------- @register_helper(annmodel.SomeBool()) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -154,6 +154,15 @@ else: return os.lstat(path.as_bytes()) + + at specialize.argtype(0) +def statvfs(path): + if isinstance(path, str): + return os.statvfs(path) + else: + return os.statvfs(path.as_bytes()) + + @specialize.argtype(0) def unlink(path): if isinstance(path, str): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1698,6 +1698,18 @@ from rpython.rtyper.module import ll_os_stat return ll_os_stat.register_stat_variant('lstat', traits) + @registering_if(os, 'fstatvfs') + def register_os_fstatvfs(self): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('fstatvfs', StringTraits()) + + if hasattr(os, 'statvfs'): + @registering_str_unicode(os.statvfs) + def register_os_statvfs(self, traits): + from rpython.rtyper.module import ll_os_stat + return ll_os_stat.register_statvfs_variant('statvfs', traits) + + # ------------------------------- os.W* --------------------------------- w_star = ['WCOREDUMP', 'WIFCONTINUED', 'WIFSTOPPED', diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -2,20 +2,22 @@ and os.fstat(). In RPython like in plain Python the stat result can be indexed like a tuple but also exposes the st_xxx attributes. """ -import os, sys + +import os +import sys + from rpython.annotator import model as annmodel -from rpython.tool.pairtype import pairtype -from rpython.tool.sourcetools import func_with_new_name, func_renamer -from rpython.rtyper import extregistry -from rpython.rtyper.extfunc import register_external, extdef -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rtyper.tool import rffi_platform as platform -from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask -from rpython.rlib.objectmodel import specialize +from rpython.rtyper import extregistry +from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.extfunc import extdef +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.rtupletype import TUPLE_TYPE +from rpython.rtyper.tool import rffi_platform as platform +from rpython.tool.pairtype import pairtype +from rpython.tool.sourcetools import func_renamer from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.annlowlevel import hlstr # Support for float times is here. # - ALL_STAT_FIELDS contains Float fields if the system can retrieve @@ -47,12 +49,26 @@ ("st_flags", lltype.Signed), #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented - ] +] N_INDEXABLE_FIELDS = 10 # For OO backends, expose only the portable fields (the first 10). PORTABLE_STAT_FIELDS = ALL_STAT_FIELDS[:N_INDEXABLE_FIELDS] +STATVFS_FIELDS = [ + ("f_bsize", lltype.Signed), + ("f_frsize", lltype.Signed), + ("f_blocks", lltype.Signed), + ("f_bfree", lltype.Signed), + ("f_bavail", lltype.Signed), + ("f_files", lltype.Signed), + ("f_ffree", lltype.Signed), + ("f_favail", lltype.Signed), + ("f_flag", lltype.Signed), + ("f_namemax", lltype.Signed), +] + + # ____________________________________________________________ # # Annotation support @@ -79,6 +95,7 @@ def stat_result_reduce(st): return (st[0], st[1], st[2], st[3], st[4], st[5], st[6], st[7], st[8], st[9]) + def stat_result_recreate(tup): return make_stat_result(tup + extra_zeroes) s_reduced = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) @@ -86,6 +103,26 @@ extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) return s_reduced, stat_result_reduce, stat_result_recreate + +class SomeStatvfsResult(annmodel.SomeObject): + if hasattr(os, 'statvfs_result'): + knowntype = os.statvfs_result + else: + knowntype = None # will not be used + + def rtyper_makerepr(self, rtyper): + from rpython.rtyper.module import r_os_stat + return r_os_stat.StatvfsResultRepr(rtyper) + + def rtyper_makekey_ex(self, rtyper): + return self.__class__, + + def getattr(self, s_attr): + assert s_attr.is_constant() + TYPE = STATVFS_FIELD_TYPES[s_attr.const] + return annmodel.lltype_to_annotation(TYPE) + + class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): def getitem((s_sta, s_int)): assert s_int.is_constant(), "os.stat()[index]: index must be constant" @@ -94,7 +131,17 @@ name, TYPE = STAT_FIELDS[index] return annmodel.lltype_to_annotation(TYPE) + +class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): + def getitem((s_stat, s_int)): + assert s_int.is_constant() + name, TYPE = STATVFS_FIELDS[s_int.const] + return annmodel.lltype_to_annotation(TYPE) + + s_StatResult = SomeStatResult() +s_StatvfsResult = SomeStatvfsResult() + def make_stat_result(tup): """Turn a tuple into an os.stat_result object.""" @@ -104,6 +151,11 @@ kwds[name] = tup[N_INDEXABLE_FIELDS + i] return os.stat_result(positional, kwds) + +def make_statvfs_result(tup): + return os.statvfs_result(tup) + + class MakeStatResultEntry(extregistry.ExtRegistryEntry): _about_ = make_stat_result @@ -114,22 +166,33 @@ from rpython.rtyper.module import r_os_stat return r_os_stat.specialize_make_stat_result(hop) + +class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_statvfs_result + + def compute_result_annotation(self, s_tup): + return s_StatvfsResult + + def specialize_call(self, hop): + from rpython.rtyper.module import r_os_stat + return r_os_stat.specialize_make_statvfs_result(hop) + # ____________________________________________________________ # # RFFI support if sys.platform.startswith('win'): _name_struct_stat = '_stati64' - INCLUDES = ['sys/types.h', 'sys/stat.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h'] else: _name_struct_stat = 'stat' - INCLUDES = ['sys/types.h', 'sys/stat.h', 'unistd.h'] + INCLUDES = ['sys/types.h', 'sys/stat.h', 'sys/statvfs.h', 'unistd.h'] compilation_info = ExternalCompilationInfo( # This must be set to 64 on some systems to enable large file support. #pre_include_bits = ['#define _FILE_OFFSET_BITS 64'], # ^^^ nowadays it's always set in all C files we produce. - includes = INCLUDES + includes=INCLUDES ) if TIMESPEC is not None: @@ -141,7 +204,7 @@ def posix_declaration(try_to_add=None): - global STAT_STRUCT + global STAT_STRUCT, STATVFS_STRUCT LL_STAT_FIELDS = STAT_FIELDS[:] if try_to_add: @@ -173,15 +236,17 @@ class CConfig: _compilation_info_ = compilation_info STAT_STRUCT = platform.Struct('struct %s' % _name_struct_stat, LL_STAT_FIELDS) + STATVFS_STRUCT = platform.Struct('struct statvfs', STATVFS_FIELDS) + try: - config = platform.configure(CConfig, ignore_errors= - try_to_add is not None) + config = platform.configure(CConfig, ignore_errors=try_to_add is not None) except platform.CompilationError: if try_to_add: return # failed to add this field, give up raise STAT_STRUCT = lltype.Ptr(config['STAT_STRUCT']) + STATVFS_STRUCT = lltype.Ptr(config['STATVFS_STRUCT']) if try_to_add: STAT_FIELDS.append(try_to_add) @@ -202,6 +267,9 @@ STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] del _name, _TYPE +STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) +STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] + def build_stat_result(st): # only for LL backends @@ -233,6 +301,21 @@ return make_stat_result(result) +def build_statvfs_result(st): + return make_statvfs_result(( + st.c_f_bsize, + st.c_f_frsize, + st.c_f_blocks, + st.c_f_bfree, + st.c_f_bavail, + st.c_f_files, + st.c_f_ffree, + st.c_f_favail, + st.c_f_flag, + st.c_f_namemax + )) + + def register_stat_variant(name, traits): if name != 'fstat': arg_is_path = True @@ -301,6 +384,56 @@ [s_arg], s_StatResult, "ll_os.ll_os_%s" % (name,), llimpl=posix_stat_llimpl, llfakeimpl=posix_fakeimpl) + +def register_statvfs_variant(name, traits): + if name != 'fstatvfs': + arg_is_path = True + s_arg = traits.str0 + ARG1 = traits.CCHARP + else: + arg_is_path = False + s_arg = int + ARG1 = rffi.INT + + posix_mystatvfs = rffi.llexternal(name, + [ARG1, STATVFS_STRUCT], rffi.INT, + compilation_info=compilation_info + ) + + @func_renamer('os_%s_llimpl' % (name,)) + def posix_statvfs_llimpl(arg): + stresult = lltype.malloc(STATVFS_STRUCT.TO, flavor='raw') + try: + if arg_is_path: + arg = traits.str2charp(arg) + error = rffi.cast(rffi.LONG, posix_mystatvfs(arg, stresult)) + if arg_is_path: + traits.free_charp(arg) + if error != 0: + raise OSError(rposix.get_errno(), "os_?statvfs failed") + return build_statvfs_result(stresult) + finally: + lltype.free(stresult, flavor='raw') + + @func_renamer('os_%s_fake' % (name,)) + def posix_fakeimpl(arg): + if s_arg == traits.str0: + arg = hlstr(arg) + st = getattr(os, name)(arg) + fields = [TYPE for fieldname, TYPE in STATVFS_FIELDS] + TP = TUPLE_TYPE(fields) + ll_tup = lltype.malloc(TP.TO) + for i, (fieldname, TYPE) in enumerate(STATVFS_FIELDS): + val = getattr(st, fieldname) + rffi.setintfield(ll_tup, 'item%d' % i, int(val)) + return ll_tup + + return extdef( + [s_arg], s_StatvfsResult, "ll_os.ll_os_%s" % (name,), + llimpl=posix_statvfs_llimpl, llfakeimpl=posix_fakeimpl + ) + + def make_win32_stat_impl(name, traits): from rpython.rlib import rwin32 from rpython.rtyper.module.ll_win32file import make_win32_traits diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -67,3 +67,52 @@ # no-op conversion from r_StatResult.r_tuple to r_StatResult hop.exception_cannot_occur() return v_result + + +class StatvfsResultRepr(Repr): + + def __init__(self, rtyper): + self.rtyper = rtyper + self.statvfs_fields = ll_os_stat.STATVFS_FIELDS + + self.statvfs_field_indexes = {} + for i, (name, TYPE) in enumerate(self.statvfs_fields): + self.statvfs_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + for name, TYPE in self.statvfs_fields]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + hop2.forced_opname = 'getitem' + hop2.args_v = [hop2.args_v[0], Constant(index)] + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.statvfs_field_indexes[attr] + except KeyError: + raise TyperError("os.statvfs().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + + +def specialize_make_statvfs_result(hop): + r_StatvfsResult = hop.rtyper.getrepr(ll_os_stat.s_StatvfsResult) + [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) + hop.exception_cannot_occur() + return v_result diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -46,6 +46,26 @@ data = getllimpl(os.getlogin)() assert data == expected +def test_statvfs(): + if not hasattr(os, 'statvfs'): + py.test.skip('posix specific function') + try: + expected = os.statvfs('.') + except OSError, e: + py.test.skip("the underlying os.statvfs() failed: %s" % e) + data = getllimpl(os.statvfs)('.') + assert data == expected + +def test_fstatvfs(): + if not hasattr(os, 'fstatvfs'): + py.test.skip('posix specific function') + try: + expected = os.fstatvfs(0) + except OSError, e: + py.test.skip("the underlying os.fstatvfs() failed: %s" % e) + data = getllimpl(os.fstatvfs)(0) + assert data == expected + def test_utimes(): if os.name != 'nt': py.test.skip('Windows specific feature') diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -478,7 +478,7 @@ 'rep', 'movs', 'movhp', 'lods', 'stos', 'scas', 'cwde', 'prefetch', # floating-point operations cannot produce GC pointers 'f', - 'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp', + 'cvt', 'ucomi', 'comi', 'subs', 'subp', 'adds', 'addp', 'xorp', 'movap', 'movd', 'movlp', 'movup', 'sqrt', 'rsqrt', 'movhlp', 'movlhp', 'mins', 'minp', 'maxs', 'maxp', 'unpck', 'pxor', 'por', # sse2 'shufps', 'shufpd', @@ -495,13 +495,15 @@ # sign-extending moves should not produce GC pointers 'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto', # zero-extending moves should not produce GC pointers - 'movz', + 'movz', # locked operations should not move GC pointers, at least so far 'lock', 'pause', # non-temporal moves should be reserved for areas containing # raw data, not GC pointers 'movnt', 'mfence', 'lfence', 'sfence', - ]) + # bit manipulations + 'bextr', + ]) # a partial list is hopefully good enough for now; it's all to support # only one corner case, tested in elf64/track_zero.s @@ -741,7 +743,7 @@ # tail-calls are equivalent to RET for us return InsnRet(self.CALLEE_SAVE_REGISTERS) return InsnStop("jump") - + def register_jump_to(self, label, lastinsn=None): if lastinsn is None: lastinsn = self.insns[-1] @@ -1020,7 +1022,7 @@ visit_movl = visit_mov visit_xorl = _maybe_32bit_dest(FunctionGcRootTracker.binary_insn) - + visit_pushq = FunctionGcRootTracker._visit_push visit_addq = FunctionGcRootTracker._visit_add From noreply at buildbot.pypy.org Fri Jul 26 02:03:19 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 26 Jul 2013 02:03:19 +0200 (CEST) Subject: [pypy-commit] pypy py3k: now bytes on py3 Message-ID: <20130726000319.65B8C1C142B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65676:d962b5d6ed78 Date: 2013-07-25 17:01 -0700 http://bitbucket.org/pypy/pypy/changeset/d962b5d6ed78/ Log: now bytes on py3 diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -119,7 +119,7 @@ def test_strings(self): # first, try automatic conversion from applevel - self.check(app_types.char_p, self.space.wrap('foo'), 'foo') + self.check(app_types.char_p, self.space.wrapbytes('foo'), 'foo') self.check(app_types.unichar_p, self.space.wrap(u'foo\u1234'), u'foo\u1234') self.check(app_types.unichar_p, self.space.wrap('foo'), u'foo') # then, try to pass explicit pointers From noreply at buildbot.pypy.org Fri Jul 26 02:03:23 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 26 Jul 2013 02:03:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: path of least resistance: workaround dlerror returning junk messages under Message-ID: <20130726000323.188921C142B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65677:191650bed89e Date: 2013-07-25 17:02 -0700 http://bitbucket.org/pypy/pypy/changeset/191650bed89e/ Log: path of least resistance: workaround dlerror returning junk messages under ll2ctypes on the buildbot diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -5,6 +5,7 @@ from pypy.objspace.std.stringtype import getbytevalue from rpython.rlib.clibffi import * +from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.rlib.unroll import unrolling_iterable @@ -142,7 +143,13 @@ space.wrap("not supported by libffi")) def wrap_dlopenerror(space, e, filename): - msg = e.msg if e.msg else 'unspecified error' + if e.msg: + # dlerror can return garbage messages under ll2ctypes (not + # we_are_translated()), so repr it to avoid potential problems + # converting to unicode later + msg = e.msg if we_are_translated() else repr(e.msg) + else: + msg = 'unspecified error' return operationerrfmt(space.w_OSError, 'Cannot load library %s: %s', filename, msg) From noreply at buildbot.pypy.org Fri Jul 26 07:34:35 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 26 Jul 2013 07:34:35 +0200 (CEST) Subject: [pypy-commit] pypy pypy3-release-2.1.x: branch for the pypy3 2.1.0 release Message-ID: <20130726053435.79C5A1C0149@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: pypy3-release-2.1.x Changeset: r65678:1fc106b34e94 Date: 2013-07-25 22:32 -0700 http://bitbucket.org/pypy/pypy/changeset/1fc106b34e94/ Log: branch for the pypy3 2.1.0 release diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.0.2' +release = '2.1.0-beta1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/release-pypy3-2.1.0-beta1.rst b/pypy/doc/release-pypy3-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy3-2.1.0-beta1.rst @@ -0,0 +1,60 @@ +================== +PyPy3 2.1.0 beta 1 +================== + +We're pleased to announce the first beta of the upcoming 2.1.0 release of +PyPy3. This is the first release of PyPy which targets Python 3.2 +compatibility. + +We would like to thank all of the people who donated_ to the `py3k proposal`_ +for supporting the work that went into this and future releases. + +You can download the PyPy3 2.1.0 beta 1 release here: + + http://pypy.org/download.html + +Highlights +========== + +* The first release of PyPy3: support for Python 3, targetting CPython 3.2.3! + Albeit with a few missing features: + + - The stdlib test_memoryview includes some failing tests (marked to + skip) and test_multiprocessing is known to deadlock on some + platforms + + - There are some known performance regressions (issues `#1540`_ & + `#1541`_) slated to be resolved before the final release + + - NumPyPy is currently disabled + +What is PyPy3? +============== + +PyPy3 is a very compliant Python interpreter, almost a drop-in replacement for +CPython 3.2.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. However Windows 32 support could use some improvement. + +Windows 64 work is still stalling and we would welcome a volunteer to handle +that. + +How to use PyPy3? +================= + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`#1540`: https://bugs.pypy.org/issue1540 +.. _`#1541`: https://bugs.pypy.org/issue1541 +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/whatsnew-pypy3-2.1.0-beta1.rst b/pypy/doc/whatsnew-pypy3-2.1.0-beta1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy3-2.1.0-beta1.rst @@ -0,0 +1,3 @@ +========================= +What's new in PyPy3 2.1.0 +========================= diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "3.2.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-alpha0" +#define PYPY_VERSION "2.1.0-beta1" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 1, 0, "beta", 1) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Fri Jul 26 09:55:24 2013 From: noreply at buildbot.pypy.org (bivab) Date: Fri, 26 Jul 2013 09:55:24 +0200 (CEST) Subject: [pypy-commit] pypy default: add the download link Message-ID: <20130726075524.09B991C002A@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65679:bf0099746fc3 Date: 2013-07-26 09:54 +0200 http://bitbucket.org/pypy/pypy/changeset/bf0099746fc3/ Log: add the download link diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -5,6 +5,10 @@ We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + Highlights ========== From noreply at buildbot.pypy.org Fri Jul 26 10:02:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 10:02:47 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: in-progress Message-ID: <20130726080247.7AA2D1C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r443:0dafc3959414 Date: 2013-07-25 22:51 +0200 http://bitbucket.org/pypy/stmgc/changeset/0dafc3959414/ Log: in-progress diff --git a/c4/dbgmem.c b/c4/dbgmem.c --- a/c4/dbgmem.c +++ b/c4/dbgmem.c @@ -70,6 +70,10 @@ void stm_free(void *p, size_t sz) { + if (p == NULL) { + assert(sz == 0); + return; + } assert(((intptr_t)((char *)p + sz) & (PAGE_SIZE-1)) == 0); size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; @@ -83,6 +87,14 @@ _stm_dbgmem(p, sz, PROT_NONE); } +void *stm_realloc(void *p, size_t newsz, size_t oldsz) +{ + void *r = stm_malloc(newsz); + memcpy(r, p, oldsz < newsz ? oldsz : newsz); + stm_free(p, oldsz); + return r; +} + int _stm_can_access_memory(char *p) { long base = ((char *)p - zone_start) / PAGE_SIZE; diff --git a/c4/dbgmem.h b/c4/dbgmem.h --- a/c4/dbgmem.h +++ b/c4/dbgmem.h @@ -6,6 +6,7 @@ void *stm_malloc(size_t); void stm_free(void *, size_t); +void *stm_realloc(void *, size_t, size_t); int _stm_can_access_memory(char *); void assert_cleared(char *, size_t); @@ -13,6 +14,7 @@ #define stm_malloc(sz) malloc(sz) #define stm_free(p,sz) free(p) +#define stm_realloc(p,newsz,oldsz) realloc(p,newsz) #define assert_cleared(p,sz) do { } while(0) #endif diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -215,6 +215,7 @@ static gcptr copy_over_original(gcptr obj, gcptr id_copy) { assert(obj != id_copy); + assert(id_copy == (gcptr)obj->h_original); assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ /* check a few flags */ diff --git a/c4/lists.c b/c4/lists.c --- a/c4/lists.c +++ b/c4/lists.c @@ -18,7 +18,7 @@ void g2l_delete(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); memset(g2l, 0, sizeof(struct G2L)); } @@ -56,7 +56,7 @@ long alloc = g2l->raw_end - g2l->raw_start; long newalloc = (alloc + extra + (alloc >> 2) + 31) & ~15; //fprintf(stderr, "growth: %ld\n", newalloc); - char *newitems = malloc(newalloc); + char *newitems = stm_malloc(newalloc); newg2l.raw_start = newitems; newg2l.raw_current = newitems; newg2l.raw_end = newitems + newalloc; @@ -65,7 +65,7 @@ { g2l_insert(&newg2l, item->addr, item->val); } G2L_LOOP_END; - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); *g2l = newg2l; } @@ -151,7 +151,7 @@ //fprintf(stderr, "list %p deleted (%ld KB)\n", //gcptrlist, gcptrlist->alloc * sizeof(gcptr) / 1024); gcptrlist->size = 0; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = NULL; gcptrlist->alloc = 0; } @@ -162,7 +162,8 @@ return; size_t nsize = gcptrlist->size * sizeof(gcptr); - gcptr *newitems = realloc(gcptrlist->items, nsize); + gcptr *newitems = stm_realloc(gcptrlist->items, nsize, + gcptrlist->alloc * sizeof(gcptr)); if (newitems != NULL || nsize == 0) { gcptrlist->items = newitems; @@ -177,11 +178,11 @@ //fprintf(stderr, "list %p growth to %ld items (%ld KB)\n", // gcptrlist, newalloc, newalloc * sizeof(gcptr) / 1024); - gcptr *newitems = malloc(newalloc * sizeof(gcptr)); + gcptr *newitems = stm_malloc(newalloc * sizeof(gcptr)); long i; for (i=0; isize; i++) newitems[i] = gcptrlist->items[i]; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = newitems; gcptrlist->alloc = newalloc; } diff --git a/c4/lists.h b/c4/lists.h --- a/c4/lists.h +++ b/c4/lists.h @@ -1,6 +1,8 @@ #ifndef _SRCSTM_LISTS_H #define _SRCSTM_LISTS_H +#include "dbgmem.h" + /************************************************************/ /* The g2l_xx functions ("global_to_local") are implemented as a tree, @@ -36,7 +38,7 @@ void g2l_clear(struct G2L *g2l); void g2l_delete(struct G2L *g2l); static inline void g2l_delete_not_used_any_more(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); } static inline int g2l_any_entry(struct G2L *g2l) { diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -477,9 +477,12 @@ lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() major_collect() - lib.stm_pop_root() + p2b = lib.stm_pop_root() + check_not_free(p2b) + check_not_free(p1) p1b = lib.stm_read_barrier(p1) check_not_free(p1b) + assert p1 != p1b and p1b != p2 and p2 != p1 def test_big_old_object(): for words in range(80): From noreply at buildbot.pypy.org Fri Jul 26 10:02:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 10:02:48 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Found the source of the bug Message-ID: <20130726080248.A50AD1C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r444:1726fde1a4ff Date: 2013-07-25 23:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/1726fde1a4ff/ Log: Found the source of the bug diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -238,7 +238,8 @@ gcptr pseudoprebuilt(size_t size, int tid) { - gcptr x = calloc(1, size); + gcptr x = stm_malloc(size); + memset(x, 0, size); x->h_tid = PREBUILT_FLAGS | tid; x->h_revision = PREBUILT_REVISION; return x; From noreply at buildbot.pypy.org Fri Jul 26 10:02:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 10:02:49 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix (argh), by adding a reasonable assert, and fix all tests to pass this condition. Message-ID: <20130726080249.E7D601C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r445:7cf82d665852 Date: 2013-07-25 23:19 +0200 http://bitbucket.org/pypy/stmgc/changeset/7cf82d665852/ Log: Fix (argh), by adding a reasonable assert, and fix all tests to pass this condition. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -569,6 +569,12 @@ gcptr stm_WriteBarrier(gcptr P) { assert(!(P->h_tid & GCFLAG_IMMUTABLE)); + assert(stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); + /* If stmgc_size(P) gives a number <= sizeof(stub)-WORD, then there is a + risk of overrunning the object later in gcpage.c when copying a stub + over it. However such objects are so small that they contain no field + at all, and so no write barrier should occur on them. */ + if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -586,6 +586,7 @@ def delegate(p1, p2): assert classify(p1) == "public" assert classify(p2) == "public" + assert lib.gettid(p1) != 42 and lib.gettid(p2) == lib.gettid(p1) p1.h_revision = ffi.cast("revision_t", p2) p1.h_tid |= GCFLAG_PUBLIC_TO_PRIVATE if p1.h_tid & GCFLAG_PREBUILT_ORIGINAL: diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -159,7 +159,7 @@ lib.stm_pop_root() def test_local_copy_from_global_obj(): - p1 = oalloc(HDR); make_public(p1) + p1 = oalloc(HDR + WORD); make_public(p1) p2n = lib.stm_write_barrier(p1) assert p2n != p1 assert lib.stm_write_barrier(p1) == p2n @@ -184,8 +184,8 @@ assert p3 == p2 def test_new_version(): - p1 = oalloc(HDR); make_public(p1) - p2 = oalloc(HDR); make_public(p2) + p1 = oalloc(HDR + WORD); make_public(p1) + p2 = oalloc(HDR + WORD); make_public(p2) delegate(p1, p2) check_not_free(p1) check_not_free(p2) @@ -214,10 +214,10 @@ def test_new_version_kill_intermediate(): - p1 = oalloc(HDR); make_public(p1) - p2 = oalloc(HDR); make_public(p2) - p3 = oalloc(HDR); make_public(p3) - p4 = oalloc(HDR); make_public(p4) + p1 = oalloc(HDR + WORD); make_public(p1) + p2 = oalloc(HDR + WORD); make_public(p2) + p3 = oalloc(HDR + WORD); make_public(p3) + p4 = oalloc(HDR + WORD); make_public(p4) delegate(p1, p2) delegate(p2, p3) delegate(p3, p4) @@ -318,7 +318,7 @@ check_free_old(p3) def test_prebuilt_version_to_protected(): - p1 = lib.pseudoprebuilt(HDR, 42 + HDR) + p1 = lib.pseudoprebuilt(HDR + WORD, 42 + HDR + WORD) p2 = lib.stm_write_barrier(p1) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() @@ -366,7 +366,7 @@ check_not_free(p1) def test_private_from_protected_young(): - p1 = nalloc(HDR) + p1 = nalloc(HDR + WORD) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() p1b = lib.stm_write_barrier(p1) @@ -384,7 +384,7 @@ assert follow_revision(p1).h_tid & GCFLAG_BACKUP_COPY def test_backup_stolen(): - p = palloc(HDR) + p = palloc(HDR + WORD) def f1(r): p1 = lib.stm_write_barrier(p) # private copy lib.stm_push_root(p1) @@ -429,7 +429,7 @@ run_parallel(f1, f2) def test_private_from_protected_inevitable(): - p1 = nalloc(HDR) + p1 = nalloc(HDR + WORD) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() p1b = lib.stm_write_barrier(p1) @@ -452,7 +452,7 @@ check_not_free(lib.getptr(p1, 0)) def test_prebuilt_modified_during_transaction(): - p1 = palloc(HDR) + p1 = palloc(HDR + WORD) p2 = nalloc_refs(1) lib.setptr(p2, 0, p1) lib.stm_push_root(p2) @@ -466,7 +466,7 @@ check_not_free(p1b) def test_prebuilt_modified_later(): - p1 = palloc(HDR) + p1 = palloc(HDR + WORD) p2 = nalloc_refs(1) lib.setptr(p2, 0, p1) lib.stm_push_root(p2) @@ -490,7 +490,7 @@ # assert did not crash def test_big_old_object_free(): - for words in range(80): + for words in range(1, 80): p1 = oalloc(HDR + words * WORD) p1b = lib.stm_write_barrier(p1) assert p1b == p1 From noreply at buildbot.pypy.org Fri Jul 26 10:02:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 10:02:51 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: More fixes. Message-ID: <20130726080251.080551C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r446:0d0ab39425d7 Date: 2013-07-26 10:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/0d0ab39425d7/ Log: More fixes. diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -241,8 +241,10 @@ size_t objsize; if (obj->h_tid & GCFLAG_STUB) objsize = sizeof(struct stm_stub_s); - else + else { objsize = stmgc_size(obj); + assert(objsize > sizeof(struct stm_stub_s) - WORD); + } dprintf(("copy %p over %p (%ld bytes)\n", obj, id_copy, objsize)); memcpy(id_copy + 1, obj + 1, objsize - sizeof(struct stm_object_s)); diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -586,7 +586,7 @@ def delegate(p1, p2): assert classify(p1) == "public" assert classify(p2) == "public" - assert lib.gettid(p1) != 42 and lib.gettid(p2) == lib.gettid(p1) + assert lib.gettid(p1) != 42 + HDR and lib.gettid(p2) == lib.gettid(p1) p1.h_revision = ffi.cast("revision_t", p2) p1.h_tid |= GCFLAG_PUBLIC_TO_PRIVATE if p1.h_tid & GCFLAG_PREBUILT_ORIGINAL: diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -201,8 +201,8 @@ assert p3 == lib.stm_write_barrier(p1) def test_new_version_id_alive(): - p1 = oalloc(HDR); make_public(p1) - p2 = oalloc(HDR); make_public(p2) + p1 = oalloc(HDR + WORD); make_public(p1) + p2 = oalloc(HDR + WORD); make_public(p2) delegate(p1, p2) lib.stm_push_root(p1) major_collect() @@ -237,10 +237,10 @@ def test_new_version_kill_intermediate_non_root(): p1 = oalloc_refs(1); make_public(p1) - p2 = oalloc(HDR); make_public(p2) - p3 = oalloc(HDR); make_public(p3) - p4 = oalloc(HDR); make_public(p4) - p5 = oalloc(HDR); make_public(p5) + p2 = oalloc(HDR + WORD); make_public(p2) + p3 = oalloc(HDR + WORD); make_public(p3) + p4 = oalloc(HDR + WORD); make_public(p4) + p5 = oalloc(HDR + WORD); make_public(p5) delegate(p2, p3) delegate(p3, p4) delegate(p4, p5) @@ -295,9 +295,9 @@ check_prebuilt(p1) def test_prebuilt_version_2(): - p1 = lib.pseudoprebuilt(HDR, 42 + HDR) - p2 = oalloc(HDR); make_public(p2) - p3 = oalloc(HDR); make_public(p3) + p1 = lib.pseudoprebuilt(HDR + WORD, 42 + HDR + WORD) + p2 = oalloc(HDR + WORD); make_public(p2) + p3 = oalloc(HDR + WORD); make_public(p3) delegate(p1, p2) delegate(p2, p3) major_collect() @@ -306,9 +306,9 @@ check_free_old(p3) def test_prebuilt_with_hash(): - p1 = lib.pseudoprebuilt_with_hash(HDR, 42 + HDR, 99) - p2 = oalloc(HDR); make_public(p2) - p3 = oalloc(HDR); make_public(p3) + p1 = lib.pseudoprebuilt_with_hash(HDR + WORD, 42 + HDR + WORD, 99) + p2 = oalloc(HDR + WORD); make_public(p2) + p3 = oalloc(HDR + WORD); make_public(p3) delegate(p1, p2) delegate(p2, p3) major_collect() From noreply at buildbot.pypy.org Fri Jul 26 11:07:01 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 26 Jul 2013 11:07:01 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: big oops, fortunately caught by test Message-ID: <20130726090701.B59951C01E5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65680:d580b1cfca30 Date: 2013-07-26 11:06 +0200 http://bitbucket.org/pypy/pypy/changeset/d580b1cfca30/ Log: big oops, fortunately caught by test diff --git a/rpython/rtyper/lltypesystem/rlist.py b/rpython/rtyper/lltypesystem/rlist.py --- a/rpython/rtyper/lltypesystem/rlist.py +++ b/rpython/rtyper/lltypesystem/rlist.py @@ -259,10 +259,12 @@ a realloc(). In the common case where we already overallocated enough, then this is a very fast operation. """ + cond = len(l.items) < newsize if jit.isconstant(len(l.items)) and jit.isconstant(newsize): - _ll_list_resize_hint_really(l, newsize, True) + if cond: + _ll_list_resize_hint_really(l, newsize, True) else: - jit.conditional_call(len(l.items) < newsize, + jit.conditional_call(cond, _ll_list_resize_hint_really, l, newsize, True) l.length = newsize @@ -273,7 +275,8 @@ """ cond = newsize < (len(l.items) >> 1) - 5 if jit.isconstant(len(l.items)) and jit.isconstant(newsize): - _ll_list_resize_hint_really(l, newsize, False) + if cond: + _ll_list_resize_hint_really(l, newsize, False) else: jit.conditional_call(cond, _ll_list_resize_hint_really, l, newsize, False) From noreply at buildbot.pypy.org Fri Jul 26 11:07:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 11:07:32 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Experimental: during major collection, find out private/protected Message-ID: <20130726090732.5BEDB1C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r447:2cd9cc934675 Date: 2013-07-26 11:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/2cd9cc934675/ Log: Experimental: during major collection, find out private/protected objects that point to public objects with a most recent version that is also private/protected by the same thread. In this case we can replace the pointer with a direct pointer. diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -245,7 +245,7 @@ objsize = stmgc_size(obj); assert(objsize > sizeof(struct stm_stub_s) - WORD); } - dprintf(("copy %p over %p (%ld bytes)\n", obj, id_copy, objsize)); + dprintf(("copy %p over %p (%zd bytes)\n", obj, id_copy, objsize)); memcpy(id_copy + 1, obj + 1, objsize - sizeof(struct stm_object_s)); /* copy the object's h_revision number */ @@ -258,8 +258,15 @@ return id_copy; } -static void visit_nonpublic(gcptr obj) +static void visit_nonpublic(gcptr obj, struct tx_public_descriptor *gcp) { + /* Visit a protected or private object. 'gcp' must be either NULL or + point to the thread that has got the object. This 'gcp' is only an + optimization: it lets us trace (most) private/protected objects + and replace pointers to public objects in them with pointers to + private/protected objects if they are the most recent ones, + provided they belong to the same thread. + */ assert(!(obj->h_tid & GCFLAG_PUBLIC)); assert(!(obj->h_tid & GCFLAG_STUB)); assert(!(obj->h_tid & GCFLAG_HAS_ID)); @@ -270,14 +277,15 @@ return; /* already visited */ obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); + gcptrlist_insert2(&objects_to_trace, obj, (gcptr)gcp); } -static gcptr visit_public(gcptr obj) +static gcptr visit_public(gcptr obj, struct tx_public_descriptor *gcp) { /* The goal is to walk to the most recent copy, then copy its content back into the h_original, and finally returns this - h_original. + h_original. Or, if gcp != NULL and the most recent copy is + protected by precisely 'gcp', then we return it instead. */ gcptr original; if (obj->h_original != 0 && @@ -335,6 +343,10 @@ The pair obj2/obj3 was or will be handled by mark_all_stack_roots(). */ assert(obj3->h_tid & GCFLAG_BACKUP_COPY); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; break; } } @@ -343,7 +355,11 @@ The head of the public chain is obj. We have to explicitly keep obj2 alive. */ assert(!IS_POINTER(obj2->h_revision)); - visit_nonpublic(obj2); + visit_nonpublic(obj2, STUB_THREAD(obj)); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; break; } } @@ -355,15 +371,19 @@ /* return this original */ original->h_tid |= GCFLAG_VISITED; if (!(original->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, original); + gcptrlist_insert2(&objects_to_trace, original, NULL); return original; } -static void visit(gcptr *pobj) +static struct tx_public_descriptor *visit_protected_gcp; + +static void visit_take_protected(gcptr *pobj) { /* Visits '*pobj', marking it as surviving and possibly adding it to objects_to_trace. Fixes *pobj to point to the exact copy that - survived. + survived. This function will replace *pobj with a protected + copy if it belongs to the thread 'visit_protected_gcp', so the + latter must be initialized before any call! */ gcptr obj = *pobj; if (obj == NULL) @@ -371,25 +391,33 @@ if (!(obj->h_tid & GCFLAG_PUBLIC)) { /* 'obj' is a private or protected copy. */ - visit_nonpublic(obj); + visit_nonpublic(obj, visit_protected_gcp); } else { - *pobj = visit_public(obj); + *pobj = visit_public(obj, visit_protected_gcp); } } gcptr stmgcpage_visit(gcptr obj) { - visit(&obj); + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + visit_nonpublic(obj, NULL); + } + else { + obj = visit_public(obj, NULL); + } return obj; } static void visit_all_objects(void) { while (gcptrlist_size(&objects_to_trace) > 0) { + visit_protected_gcp = + (struct tx_public_descriptor *)gcptrlist_pop(&objects_to_trace); gcptr obj = gcptrlist_pop(&objects_to_trace); - stmgc_trace(obj, &visit); + stmgc_trace(obj, &visit_take_protected); } + visit_protected_gcp = NULL; } static void mark_prebuilt_roots(void) @@ -407,7 +435,7 @@ obj->h_tid &= ~GCFLAG_VISITED; assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - obj2 = visit_public(obj); + obj2 = visit_public(obj, NULL); assert(obj2 == obj); /* it is its own original */ } } @@ -422,7 +450,7 @@ if (((revision_t)item) & ~((revision_t)END_MARKER_OFF | (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ - visit(root); + visit_take_protected(root); dprintf(("visit stack root: %p -> %p\n", item, *root)); } else if (item == END_MARKER_OFF) { @@ -440,13 +468,14 @@ for (d = stm_tx_head; d; d = d->tx_next) { assert(!stm_has_got_any_lock(d)); + visit_protected_gcp = d->public_descriptor; /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); /* the thread-local object */ - visit(d->thread_local_obj_ref); - visit(&d->old_thread_local_obj); + visit_take_protected(d->thread_local_obj_ref); + visit_take_protected(&d->old_thread_local_obj); /* the current transaction's private copies of public objects */ wlog_t *item; @@ -456,8 +485,10 @@ gcptr R = item->addr; gcptr L = item->val; - /* we visit the public object R */ - gcptr new_R = visit_public(R); + /* we visit the public object R. Must keep a public object + here, so we pass NULL as second argument. */ + gcptr new_R = visit_public(R, NULL); + assert(new_R->h_tid & GCFLAG_PUBLIC); if (new_R != R) { /* we have to update the key in public_to_private, which @@ -471,7 +502,7 @@ should be private, possibly private_from_protected, so visit() should return the same private copy */ if (L != NULL) { - visit_nonpublic(L); + visit_nonpublic(L, visit_protected_gcp); } } G2L_LOOP_END; @@ -489,8 +520,13 @@ for (i = d->private_from_protected.size - 1; i >= 0; i--) { gcptr obj = items[i]; assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - visit_nonpublic(obj); - visit((gcptr *)&obj->h_revision); + visit_nonpublic(obj, visit_protected_gcp); + + gcptr backup_obj = (gcptr)obj->h_revision; + if (!(backup_obj->h_tid & GCFLAG_PUBLIC)) + visit_nonpublic(backup_obj, visit_protected_gcp); + else + obj->h_revision = (revision_t)visit_public(backup_obj, NULL); } /* make sure that the other lists are empty */ @@ -509,6 +545,7 @@ d->num_private_from_protected_known_old); } + visit_protected_gcp = NULL; gcptrlist_delete(&new_public_to_private); } @@ -516,7 +553,7 @@ { long i; gcptr *items; - assert(d->old_objects_to_trace.size == 0); + assert(d->old_objects_to_trace.size == 0); /* If we're aborting this transaction anyway, we don't need to do * more here. From noreply at buildbot.pypy.org Fri Jul 26 11:09:47 2013 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 26 Jul 2013 11:09:47 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: fix one test Message-ID: <20130726090947.793F21C01E5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65681:4ff402573fd6 Date: 2013-07-26 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/4ff402573fd6/ Log: fix one test diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -94,7 +94,7 @@ if op.args[0].value._obj._name == 'jit_force_virtual': return 'residual' return 'builtin' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return FakeDescr() def calldescr_canraise(self, calldescr): return False From noreply at buildbot.pypy.org Fri Jul 26 11:20:55 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 26 Jul 2013 11:20:55 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Add fastpaths for read/write barriers for x64. The asm is still seriously non-optimal in the fastpath Message-ID: <20130726092055.2EBEA1C0219@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65682:f819be0d01ca Date: 2013-07-26 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/f819be0d01ca/ Log: Add fastpaths for read/write barriers for x64. The asm is still seriously non-optimal in the fastpath diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2182,12 +2182,19 @@ assert isinstance(result_loc, RegLoc) mc.POP_r(result_loc.value) - def _get_private_rev_num_addr(self): + def _get_stm_private_rev_num_addr(self): assert self.cpu.gc_ll_descr.stm rn = rstm.get_adr_of_private_rev_num() rn = rn - stmtlocal.threadlocal_base() assert rx86.fits_in_32bits(rn) return rn + + def _get_stm_read_barrier_cache_addr(self): + assert self.cpu.gc_ll_descr.stm + rbc = rstm.get_adr_of_read_barrier_cache() + rbc = rbc - stmtlocal.threadlocal_base() + assert rx86.fits_in_32bits(rbc) + return rbc def _stm_barrier_fastpath(self, mc, descr, arglocs, is_frame=False, align_stack=False): @@ -2207,18 +2214,82 @@ # # FASTPATH: # - rn = self._get_private_rev_num_addr() + # write_barrier: + # (obj->h_revision != stm_private_rev_num) + # || (obj->h_tid & GCFLAG_WRITE_BARRIER) != 0) + # read_barrier: + # (obj->h_revision != stm_private_rev_num) + # && (FXCACHE_AT(obj) != obj))) + assert not IS_X86_32 # XXX: todo + jz_location = 0 + jz_location2 = 0 + jnz_location = 0 + # compare h_revision with stm_private_rev_num (XXX: may be slow) + rn = self._get_stm_private_rev_num_addr() + stmtlocal.tl_segment_prefix(mc) + mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) + if loc_base == ebp: + mc.CMP_rb(X86_64_SCRATCH_REG.value, StmGC.H_REVISION) + else: + mc.CMP(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_REVISION)) + if isinstance(descr, STMReadBarrierDescr): - # (obj->h_revision != stm_private_rev_num) - # && (FXCACHE_AT(obj) != obj))) - stmtlocal.tl_segment_prefix(mc) - #mc.CMP_jr(rn, loc_base.value) - mc.MOV_rj(X86_64_SCRATCH_REG.value, rn) - mc.CMP(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_REVISION)) + # jump to end if h_rev==priv_rev mc.J_il8(rx86.Conditions['Z'], 0) # patched below jz_location = mc.get_relative_pos() - else: - jz_location = 0 + else: # write_barrier + # jump to slowpath if h_rev!=priv_rev + mc.J_il8(rx86.Conditions['NZ'], 0) # patched below + jnz_location = mc.get_relative_pos() + + if isinstance(descr, STMReadBarrierDescr): + # FXCACHE_AT(obj) != obj + # XXX: optimize... + temp = loc_base.find_unused_reg() + mc.PUSH_r(temp.value) + mc.MOV_rr(temp.value, loc_base.value) + mc.AND_ri(temp.value, StmGC.FX_MASK) + + # XXX: addressings like [rdx+rax*1] don't seem to work + rbc = self._get_stm_read_barrier_cache_addr() + stmtlocal.tl_segment_prefix(mc) + mc.MOV_rj(X86_64_SCRATCH_REG.value, rbc) + mc.ADD_rr(X86_64_SCRATCH_REG.value, temp.value) + mc.CMP(loc_base, mem(X86_64_SCRATCH_REG, 0)) + mc.POP_r(temp.value) + mc.J_il8(rx86.Conditions['Z'], 0) # patched below + jz_location2 = mc.get_relative_pos() + # : mov rdx,0xffffffffffffffb0 + # : movzx eax,di + # : mov rdx,QWORD PTR fs:[rdx] + # : cmp rdi,QWORD PTR [rdx+rax*1] + # : je 0x401f61 + # : jmp 0x6a59f0 + + if isinstance(descr, STMWriteBarrierDescr): + # obj->h_tid & GCFLAG_WRITE_BARRIER) != 0 + if loc_base == ebp: + #mc.MOV_rb(X86_64_SCRATCH_REG.value, StmGC.H_TID) + mc.TEST8_bi(StmGC.H_TID, StmGC.GCFLAG_WRITE_BARRIER) + else: + # mc.MOV(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_TID)) + mc.TEST8_mi((loc_base.value, StmGC.H_TID), + StmGC.GCFLAG_WRITE_BARRIER) + #doesn't work: + # mc.TEST(X86_64_SCRATCH_REG, imm(StmGC.GCFLAG_WRITE_BARRIER)) + mc.J_il8(rx86.Conditions['NZ'], 0) # patched below + jnz_location2 = mc.get_relative_pos() + + # jump to end + mc.JMP_l8(0) # patched below + jz_location = mc.get_relative_pos() + + # jump target slowpath: + offset = mc.get_relative_pos() - jnz_location + offset2 = mc.get_relative_pos() - jnz_location2 + assert 0 < offset <= 127 + mc.overwrite(jnz_location - 1, chr(offset)) + mc.overwrite(jnz_location2 - 1, chr(offset2)) # # SLOWPATH_START # @@ -2243,10 +2314,14 @@ # # SLOWPATH_END # + # jump target end: + offset = mc.get_relative_pos() - jz_location + assert 0 < offset <= 127 + mc.overwrite(jz_location - 1, chr(offset)) if isinstance(descr, STMReadBarrierDescr): - offset = mc.get_relative_pos() - jz_location + offset = mc.get_relative_pos() - jz_location2 assert 0 < offset <= 127 - mc.overwrite(jz_location - 1, chr(offset)) + mc.overwrite(jz_location2 - 1, chr(offset)) diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -12,23 +12,7 @@ WORD = LONG_BIT // 8 NULL = llmemory.NULL - -# keep in sync with stmgc.h & et.h: first_gcflag = 1 << (LONG_BIT//2) -GCFLAG_OLD = first_gcflag << 0 -GCFLAG_VISITED = first_gcflag << 1 -GCFLAG_PUBLIC = first_gcflag << 2 -GCFLAG_PREBUILT_ORIGINAL = first_gcflag << 3 -GCFLAG_PUBLIC_TO_PRIVATE = first_gcflag << 4 -GCFLAG_WRITE_BARRIER = first_gcflag << 5 # stmgc.h -GCFLAG_NURSERY_MOVED = first_gcflag << 6 -GCFLAG_BACKUP_COPY = first_gcflag << 7 # debug -GCFLAG_STUB = first_gcflag << 8 # debug -GCFLAG_PRIVATE_FROM_PROTECTED = first_gcflag << 9 -GCFLAG_HAS_ID = first_gcflag << 10 - -PREBUILT_FLAGS = first_gcflag * (1 + 2 + 4 + 8) -PREBUILT_REVISION = r_uint(1) class StmGC(MovingGCBase): @@ -53,6 +37,27 @@ TRANSLATION_PARAMS = { } + # keep in sync with stmgc.h & et.h: + GCFLAG_OLD = first_gcflag << 0 + GCFLAG_VISITED = first_gcflag << 1 + GCFLAG_PUBLIC = first_gcflag << 2 + GCFLAG_PREBUILT_ORIGINAL = first_gcflag << 3 + GCFLAG_PUBLIC_TO_PRIVATE = first_gcflag << 4 + GCFLAG_WRITE_BARRIER = first_gcflag << 5 # stmgc.h + GCFLAG_NURSERY_MOVED = first_gcflag << 6 + GCFLAG_BACKUP_COPY = first_gcflag << 7 # debug + GCFLAG_STUB = first_gcflag << 8 # debug + GCFLAG_PRIVATE_FROM_PROTECTED = first_gcflag << 9 + GCFLAG_HAS_ID = first_gcflag << 10 + GCFLAG_IMMUTABLE = first_gcflag << 11; + GCFLAG_SMALLSTUB = first_gcflag << 12; + + PREBUILT_FLAGS = first_gcflag * (1 + 2 + 4 + 8) + PREBUILT_REVISION = r_uint(1) + + FX_MASK = 65535 + + def setup(self): # Hack: MovingGCBase.setup() sets up stuff related to id(), which # we implement differently anyway. So directly call GCBase.setup(). @@ -75,7 +80,7 @@ def get_original_copy(self, obj): addr = llmemory.cast_ptr_to_adr(obj) - if bool(self.get_hdr_tid(addr)[0] & GCFLAG_PREBUILT_ORIGINAL): + if bool(self.get_hdr_tid(addr)[0] & self.GCFLAG_PREBUILT_ORIGINAL): return obj # orig = self.get_hdr_original(addr)[0] @@ -125,7 +130,7 @@ """Means the reference will stay valid, except if not seen by the GC, then it can get collected.""" tid = self.get_hdr_tid(obj)[0] - if bool(tid & GCFLAG_OLD): + if bool(tid & self.GCFLAG_OLD): return False return True diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -7,6 +7,10 @@ addr = llop.stm_get_adr_of_private_rev_num(llmemory.Address) return rffi.cast(lltype.Signed, addr) +def get_adr_of_read_barrier_cache(): + addr = llop.stm_get_adr_of_read_barrier_cache(llmemory.Address) + return rffi.cast(lltype.Signed, addr) + def become_inevitable(): llop.stm_become_inevitable(lltype.Void) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -457,6 +457,7 @@ 'stm_inspect_abort_info': LLOp(sideeffects=False), 'stm_get_adr_of_private_rev_num':LLOp(), + 'stm_get_adr_of_read_barrier_cache':LLOp(), # __________ address operations __________ diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -593,6 +593,7 @@ OP_STM_POP_ROOT_INTO = _OP_STM OP_STM_GET_ROOT_STACK_TOP = _OP_STM OP_STM_GET_ADR_OF_PRIVATE_REV_NUM = _OP_STM + OP_STM_GET_ADR_OF_READ_BARRIER_CACHE= _OP_STM OP_STM_ALLOCATE = _OP_STM OP_STM_WEAKREF_ALLOCATE = _OP_STM OP_STM_GET_TID = _OP_STM diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -103,6 +103,11 @@ result = funcgen.expr(op.result) return '%s = (%s)&stm_private_rev_num;' % ( result, cdecl(funcgen.lltypename(op.result), '')) + +def stm_get_adr_of_read_barrier_cache(funcgen, op): + result = funcgen.expr(op.result) + return '%s = (%s)&stm_read_barrier_cache;' % ( + result, cdecl(funcgen.lltypename(op.result), '')) def stm_weakref_allocate(funcgen, op): From noreply at buildbot.pypy.org Fri Jul 26 12:16:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 12:16:17 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Add 'CMP_ra' to rx86.py. Message-ID: <20130726101617.96C8F1C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65683:439d4b70ba5e Date: 2013-07-26 12:15 +0200 http://bitbucket.org/pypy/pypy/changeset/439d4b70ba5e/ Log: Add 'CMP_ra' to rx86.py. diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -372,6 +372,8 @@ INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) + INSN_ra = insn(rex_w, chr(base+3), register(1,8), + mem_reg_plus_scaled_reg_plus_const(2)) INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_(2)) INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_(1), immediate(2,'b')) INSN_mi8 = insn(rex_w, '\x83', orbyte(base), mem_reg_plus_const(1), @@ -393,8 +395,8 @@ INSN_bi32(mc, offset, immed) INSN_bi._always_inline_ = True # try to constant-fold single_byte() - return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, - INSN_ji8, INSN_mi8) + return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_ra, + INSN_rj, INSN_ji8, INSN_mi8) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -470,13 +472,15 @@ # ------------------------------ Arithmetic ------------------------------ - ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0) - OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_ = common_modes(1) - AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_ = common_modes(4) - SUB_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8 = common_modes(5) - SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_ = common_modes(3) - XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_ = common_modes(6) - CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_ = common_modes(7) + ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,_,ADD_rj,_,_ = common_modes(0) + OR_ri, OR_rr, OR_rb, _,_,OR_rm, _,OR_rj, _,_ = common_modes(1) + AND_ri,AND_rr,AND_rb,_,_,AND_rm,_,AND_rj,_,_ = common_modes(4) + SUB_ri,SUB_rr,SUB_rb,_,_,SUB_rm,_,SUB_rj,SUB_ji8,SUB_mi8 = ( + common_modes(5)) + SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,_,SBB_rj,_,_ = common_modes(3) + XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,_,XOR_rj,_,_ = common_modes(6) + CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_ra,CMP_rj,_,_ = ( + common_modes(7)) CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b')) CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2)) From noreply at buildbot.pypy.org Fri Jul 26 12:32:33 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 26 Jul 2013 12:32:33 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: merge default into branch Message-ID: <20130726103233.761D01C01E5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ndarray-subtype Changeset: r65684:f1b5f614487d Date: 2013-07-26 13:21 +0300 http://bitbucket.org/pypy/pypy/changeset/f1b5f614487d/ Log: merge default into branch diff too long, truncating to 2000 out of 3276 lines diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -75,6 +76,15 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) @@ -147,5 +157,8 @@ _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,66 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +Highlights +========== + +* Support for os.statvfs and os.fstatvfs on unix systems. + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +* During packaging, compile the CFFI tk extension. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -24,3 +24,22 @@ .. branch: distutils-cppldflags Copy CPython's implementation of customize_compiler, dont call split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. + +.. branch: ssl_moving_write_buffer + +.. branch: pythoninspect-fix +Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process +to start interactive prompt when the script execution finishes. This adds +new __pypy__.os.real_getenv call that bypasses Python cache and looksup env +in the underlying OS. Translatorshell now works on PyPy. + +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -196,6 +196,11 @@ print >> sys.stderr, "Python", sys.version raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -381,6 +386,7 @@ 'Q': (div_option, Ellipsis), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } @@ -550,8 +556,15 @@ # or # * PYTHONINSPECT is set and stdin is a tty. # + try: + # we need a version of getenv that bypasses Python caching + from __pypy__.os import real_getenv + except ImportError: + # dont fail on CPython here + real_getenv = os.getenv + return (interactive or - ((inspect or (readenv and os.getenv('PYTHONINSPECT'))) + ((inspect or (readenv and real_getenv('PYTHONINSPECT'))) and sys.stdin.isatty())) success = True diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -48,7 +48,7 @@ pdir = _get_next_path(ext='') p = pdir.ensure(dir=1).join('__main__.py') p.write(str(py.code.Source(source))) - # return relative path for testing purposes + # return relative path for testing purposes return py.path.local().bestrelpath(pdir) demo_script = getscript(""" @@ -706,6 +706,20 @@ assert 'hello world\n' in data assert '42\n' in data + def test_putenv_fires_interactive_within_process(self): + try: + import __pypy__ + except ImportError: + py.test.skip("This can be only tested on PyPy with real_getenv") + + # should be noninteractive when piped in + data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' + self.run('', senddata=data, expect_prompt=False) + + # should go interactive with -c + data = data.replace('\n', ';') + self.run("-c '%s'" % data, expect_prompt=True) + def test_option_S_copyright(self): data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data @@ -971,7 +985,7 @@ pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') app_main.setup_bootstrap_path(pypy_c) newpath = sys.path[:] - # we get at least lib_pypy + # we get at least lib_pypy # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) assert len(newpath) >= 2 for p in newpath: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -36,6 +36,27 @@ } +class IntOpModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'int_add': 'interp_intop.int_add', + 'int_sub': 'interp_intop.int_sub', + 'int_mul': 'interp_intop.int_mul', + 'int_floordiv': 'interp_intop.int_floordiv', + 'int_mod': 'interp_intop.int_mod', + 'int_lshift': 'interp_intop.int_lshift', + 'int_rshift': 'interp_intop.int_rshift', + 'uint_rshift': 'interp_intop.uint_rshift', + } + + +class OsModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'real_getenv': 'interp_os.real_getenv' + } + + class Module(MixedModule): appleveldefs = { } @@ -67,6 +88,8 @@ "builders": BuildersModule, "time": TimeModule, "thread": ThreadModule, + "intop": IntOpModule, + "os": OsModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_intop.py @@ -0,0 +1,39 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rarithmetic import r_uint, intmask + + + at unwrap_spec(n=int, m=int) +def int_add(space, n, m): + return space.wrap(llop.int_add(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_sub(space, n, m): + return space.wrap(llop.int_sub(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mul(space, n, m): + return space.wrap(llop.int_mul(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_floordiv(space, n, m): + return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mod(space, n, m): + return space.wrap(llop.int_mod(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_lshift(space, n, m): + return space.wrap(llop.int_lshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_rshift(space, n, m): + return space.wrap(llop.int_rshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def uint_rshift(space, n, m): + n = r_uint(n) + x = llop.uint_rshift(lltype.Unsigned, n, m) + return space.wrap(intmask(x)) diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_os.py @@ -0,0 +1,9 @@ +import os + +from pypy.interpreter.gateway import unwrap_spec + + + at unwrap_spec(name='str0') +def real_getenv(space, name): + """Get an OS environment value skipping Python cache""" + return space.wrap(os.environ.get(name)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_intop.py @@ -0,0 +1,104 @@ + + +class AppTestIntOp: + spaceconfig = dict(usemodules=['__pypy__']) + + def w_intmask(self, n): + import sys + n &= (sys.maxsize*2+1) + if n > sys.maxsize: + n -= 2*(sys.maxsize+1) + return int(n) + + def test_intmask(self): + import sys + assert self.intmask(sys.maxsize) == sys.maxsize + assert self.intmask(sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(-sys.maxsize-2) == sys.maxsize + N = 2 ** 128 + assert self.intmask(N+sys.maxsize) == sys.maxsize + assert self.intmask(N+sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(N-sys.maxsize-2) == sys.maxsize + + def test_int_add(self): + import sys + from __pypy__ import intop + assert intop.int_add(40, 2) == 42 + assert intop.int_add(sys.maxsize, 1) == -sys.maxsize-1 + assert intop.int_add(-2, -sys.maxsize) == sys.maxsize + + def test_int_sub(self): + import sys + from __pypy__ import intop + assert intop.int_sub(40, -2) == 42 + assert intop.int_sub(sys.maxsize, -1) == -sys.maxsize-1 + assert intop.int_sub(-2, sys.maxsize) == sys.maxsize + + def test_int_mul(self): + import sys + from __pypy__ import intop + assert intop.int_mul(40, -2) == -80 + assert intop.int_mul(-sys.maxsize, -sys.maxsize) == ( + self.intmask(sys.maxsize ** 2)) + + def test_int_floordiv(self): + import sys + from __pypy__ import intop + assert intop.int_floordiv(41, 3) == 13 + assert intop.int_floordiv(41, -3) == -13 + assert intop.int_floordiv(-41, 3) == -13 + assert intop.int_floordiv(-41, -3) == 13 + assert intop.int_floordiv(-sys.maxsize, -1) == sys.maxsize + assert intop.int_floordiv(sys.maxsize, -1) == -sys.maxsize + + def test_int_mod(self): + import sys + from __pypy__ import intop + assert intop.int_mod(41, 3) == 2 + assert intop.int_mod(41, -3) == 2 + assert intop.int_mod(-41, 3) == -2 + assert intop.int_mod(-41, -3) == -2 + assert intop.int_mod(-sys.maxsize, -1) == 0 + assert intop.int_mod(sys.maxsize, -1) == 0 + + def test_int_lshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + assert intop.int_lshift(42, 3) == 42 << 3 + assert intop.int_lshift(0, 3333) == 0 + assert intop.int_lshift(1, bits-2) == 1 << (bits-2) + assert intop.int_lshift(1, bits-1) == -sys.maxsize-1 == (-1) << (bits-1) + assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) + assert intop.int_lshift(-1, bits-1) == -sys.maxsize-1 + assert intop.int_lshift(sys.maxsize // 3, 2) == ( + self.intmask((sys.maxsize // 3) << 2)) + assert intop.int_lshift(-sys.maxsize // 3, 2) == ( + self.intmask((-sys.maxsize // 3) << 2)) + + def test_int_rshift(self): + from __pypy__ import intop + assert intop.int_rshift(42, 3) == 42 >> 3 + assert intop.int_rshift(-42, 3) == (-42) >> 3 + assert intop.int_rshift(0, 3333) == 0 + assert intop.int_rshift(-1, 0) == -1 + assert intop.int_rshift(-1, 1) == -1 + + def test_uint_rshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + N = 1 << bits + assert intop.uint_rshift(42, 3) == 42 >> 3 + assert intop.uint_rshift(-42, 3) == (N-42) >> 3 + assert intop.uint_rshift(0, 3333) == 0 + assert intop.uint_rshift(-1, 0) == -1 + assert intop.uint_rshift(-1, 1) == sys.maxsize + assert intop.uint_rshift(-1, bits-2) == 3 + assert intop.uint_rshift(-1, bits-1) == 1 diff --git a/pypy/module/__pypy__/test/test_os.py b/pypy/module/__pypy__/test/test_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_os.py @@ -0,0 +1,16 @@ +class AppTestOs: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_real_getenv(self): + import __pypy__.os + import os + + key = 'UNLIKELY_SET' + assert key not in os.environ + os.putenv(key, '42') + # this one skips Python cache + assert __pypy__.os.real_getenv(key) == '42' + # this one can only see things set on interpter start (cached) + assert os.getenv(key) is None + os.unsetenv(key) + assert __pypy__.os.real_getenv(key) is None diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -4,6 +4,7 @@ from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.module._rawffi.interp_rawffi import wrap_dlopenerror from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rdynload import DLLHANDLE, dlopen, dlsym, dlclose, DLOpenError @@ -24,9 +25,7 @@ try: self.handle = dlopen(ll_libname, flags) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, - "cannot load library %s: %s", - filename, e.msg) + raise wrap_dlopenerror(space, e, filename) self.name = filename def __del__(self): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1219,6 +1219,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1238,6 +1286,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") @@ -2760,6 +2832,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_ffi/interp_funcptr.py @@ -14,7 +14,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error +from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os if os.name == 'nt': @@ -324,8 +324,7 @@ try: self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', self.name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, self.name) def getfunc(self, space, w_name, w_argtypes, w_restype): return _getfunc(space, self, w_name, w_argtypes, w_restype) diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -150,7 +150,7 @@ return self.do_and_wrap(w_ffitype) -class TestFromAppLevel(object): +class TestToAppLevel(object): spaceconfig = dict(usemodules=('_ffi',)) def setup_class(cls): diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -140,6 +140,11 @@ raise OperationError(space.w_SystemError, space.wrap("not supported by libffi")) +def wrap_dlopenerror(space, e, filename): + msg = e.msg if e.msg else 'unspecified error' + return operationerrfmt(space.w_OSError, 'Cannot load library %s: %s', + filename, msg) + class W_CDLL(W_Root): def __init__(self, space, name, cdll): @@ -219,8 +224,7 @@ try: cdll = CDLL(name) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, name) except OSError, e: raise wrap_oserror(space, e) return space.wrap(W_CDLL(space, name, cdll)) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -223,7 +223,8 @@ _rawffi.CDLL("xxxxx_this_name_does_not_exist_xxxxx") except OSError, e: print e - assert str(e).startswith("xxxxx_this_name_does_not_exist_xxxxx: ") + assert str(e).startswith( + "Cannot load library xxxxx_this_name_does_not_exist_xxxxx: ") else: raise AssertionError("did not fail??") diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -722,7 +722,10 @@ libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -32,20 +33,21 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'fdopen' : 'app_posix.fdopen', - 'tmpfile' : 'app_posix.tmpfile', - 'popen' : 'app_posix.popen', - 'tmpnam' : 'app_posix.tmpnam', - 'tempnam' : 'app_posix.tempnam', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'fdopen': 'app_posix.fdopen', + 'tmpfile': 'app_posix.tmpfile', + 'popen': 'app_posix.popen', + 'tmpnam': 'app_posix.tmpnam', + 'tempnam': 'app_posix.tempnam', } if os.name == 'nt': appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -53,44 +55,46 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', } if hasattr(os, 'chown'): @@ -167,9 +171,9 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: @@ -177,7 +181,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -186,7 +190,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -194,7 +198,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -65,6 +65,23 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] + +class statvfs_result: + __metaclass__ = structseqtype + + name = osname + ".statvfs_result" + + f_bsize = structseqfield(0) + f_frsize = structseqfield(1) + f_blocks = structseqfield(2) + f_bfree = structseqfield(3) + f_bavail = structseqfield(4) + f_files = structseqfield(5) + f_ffree = structseqfield(6) + f_favail = structseqfield(7) + f_flag = structseqfield(8) + f_namemax = structseqfield(9) + if osname == 'posix': # POSIX: we want to check the file descriptor when fdopen() is called, # not later when we read or write data. So we call fstat(), letting diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,15 +1,17 @@ -from pypy.interpreter.gateway import unwrap_spec +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat from pypy.module.sys.interp_encoding import getfilesystemencoding -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -213,6 +215,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -253,6 +256,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -314,6 +327,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstatvfs(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -169,7 +169,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -183,6 +184,16 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + if hasattr(__import__(os.name), "statvfs"): + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,6 +125,9 @@ self.llbox = llbox def descr_getint(self, space): + if not jit_hooks.box_isint(self.llbox): + raise OperationError(space.w_NotImplementedError, + space.wrap("Box has no int value")) return space.wrap(jit_hooks.box_getint(self.llbox)) @unwrap_spec(no=int) @@ -182,7 +185,12 @@ @unwrap_spec(no=int) def descr_getarg(self, space, no): - return WrappedBox(jit_hooks.resop_getarg(self.op, no)) + try: + box = jit_hooks.resop_getarg(self.op, no) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("Index out of range")) + return WrappedBox(box) @unwrap_spec(no=int, w_box=WrappedBox) def descr_setarg(self, space, no, w_box): @@ -232,7 +240,8 @@ getarg = interp2app(WrappedOp.descr_getarg), setarg = interp2app(WrappedOp.descr_setarg), result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult) + WrappedOp.descr_setresult), + offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.acceptable_as_base_class = False @@ -342,6 +351,10 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), + asmaddr = interp_attrproperty('asmaddr', cls=W_JitLoopInfo, + doc="Address of machine code"), + asmlen = interp_attrproperty('asmlen', cls=W_JitLoopInfo, + doc="Length of machine code"), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -71,7 +71,7 @@ greenkey) di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'loop', greenkey) - di_loop.asminfo = AsmInfo(offset, 0, 0) + di_loop.asminfo = AsmInfo(offset, 0x42, 12) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'bridge', fail_descr=BasicFailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) @@ -123,6 +123,8 @@ assert info.greenkey[2] == False assert info.loop_no == 0 assert info.type == 'loop' + assert info.asmaddr == 0x42 + assert info.asmlen == 12 raises(TypeError, 'info.bridge_no') assert len(info.operations) == 4 int_add = info.operations[0] @@ -132,8 +134,10 @@ assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 assert dmp.call_id == 0 + assert dmp.offset == -1 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num + assert int_add.offset == 0 self.on_compile_bridge() expected = ('>' % repr(self.f.func_code)) @@ -160,6 +164,20 @@ assert 'jit hook' in s.getvalue() assert 'ZeroDivisionError' in s.getvalue() + def test_on_compile_crashes(self): + import pypyjit + loops = [] + def hook(loop): + loops.append(loop) + pypyjit.set_compile_hook(hook) + self.on_compile() + loop = loops[0] + op = loop.operations[2] + # Should not crash the interpreter + raises(IndexError, op.getarg, 2) + assert op.name == 'guard_nonnull' + raises(NotImplementedError, op.getarg(0).getint) + def test_non_reentrant(self): import pypyjit l = [] diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/support.py @@ -0,0 +1,33 @@ +import py + +from pypy.conftest import option +from pypy.interpreter.error import OperationError + +def import_lib_pypy(space, name, skipmsg=None): + """Import a top level module ensuring it's sourced from the lib_pypy + package. + + Raises a pytest Skip on ImportError if a skip message was specified. + """ + if option.runappdirect: + try: + mod = __import__('lib_pypy.' + name) + except ImportError as e: + if skipmsg is not None: + py.test.skip('%s (%s))' % (skipmsg, str(e))) + raise + return getattr(mod, name) + + try: + # app-level import should find it from the right place (we + # assert so afterwards) as long as a builtin module doesn't + # overshadow it + failed = ("%s didn't import from lib_pypy. Is a usemodules directive " + "overshadowing it?" % name) + importline = ("(): import %s; assert 'lib_pypy' in %s.__file__, %r; " + "return %s" % (name, name, failed, name)) + return space.appexec([], importline) + except OperationError as e: + if skipmsg is None or not e.match(space, space.w_ImportError): + raise + py.test.skip('%s (%s))' % (skipmsg, str(e))) diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -2,44 +2,51 @@ Extra tests for the pure Python PyPy _collections module (not used in normal PyPy's) """ +from pypy.module.test_lib_pypy.support import import_lib_pypy -from __future__ import absolute_import -from lib_pypy import _collections as collections -import py -class TestDeque: - def setup_method(self, method): - self.n = 10 - self.d = collections.deque(range(self.n)) +class AppTestDeque: + + def setup_class(cls): + space = cls.space + cls.w_collections = import_lib_pypy(space, '_collections') + cls.w_n = space.wrap(10) + + def w_get_deque(self): + return self.collections.deque(range(self.n)) def test_deque(self): - assert len(self.d) == self.n + d = self.get_deque() + assert len(d) == self.n for i in range(self.n): - assert i == self.d[i] + assert i == d[i] for i in range(self.n-1, -1, -1): - assert self.d.pop() == i - assert len(self.d) == 0 + assert d.pop() == i + assert len(d) == 0 def test_deque_iter(self): - it = iter(self.d) - py.test.raises(TypeError, len, it) + d = self.get_deque() + it = iter(d) + raises(TypeError, len, it) assert it.next() == 0 - self.d.pop() - py.test.raises(RuntimeError, it.next) + d.pop() + raises(RuntimeError, it.next) def test_deque_reversed(self): - it = reversed(self.d) - py.test.raises(TypeError, len, it) + d = self.get_deque() + it = reversed(d) + raises(TypeError, len, it) assert it.next() == self.n-1 assert it.next() == self.n-2 - self.d.pop() - py.test.raises(RuntimeError, it.next) + d.pop() + raises(RuntimeError, it.next) def test_deque_remove(self): - d = self.d - py.test.raises(ValueError, d.remove, "foobar") + d = self.get_deque() + raises(ValueError, d.remove, "foobar") def test_mutate_during_remove(self): + collections = self.collections # Handle evil mutator class MutateCmp: def __init__(self, deque, result): @@ -52,24 +59,33 @@ for match in (True, False): d = collections.deque(['ab']) d.extend([MutateCmp(d, match), 'c']) - py.test.raises(IndexError, d.remove, 'c') + raises(IndexError, d.remove, 'c') assert len(d) == 0 -class TestDequeExtra: +class AppTestDequeExtra: + + spaceconfig = dict(usemodules=('binascii', 'struct',)) + + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_remove_empty(self): + collections = self.collections d = collections.deque([]) - py.test.raises(ValueError, d.remove, 1) + raises(ValueError, d.remove, 1) def test_remove_mutating(self): + collections = self.collections class MutatingCmp(object): def __eq__(self, other): d.clear() return True d = collections.deque([MutatingCmp()]) - py.test.raises(IndexError, d.remove, 1) + raises(IndexError, d.remove, 1) def test_remove_failing(self): + collections = self.collections class FailingCmp(object): def __eq__(self, other): assert False @@ -77,10 +93,11 @@ f = FailingCmp() d = collections.deque([1, 2, 3, f, 4, 5]) d.remove(3) - py.test.raises(AssertionError, d.remove, 4) + raises(AssertionError, d.remove, 4) assert d == collections.deque([1, 2, f, 4, 5]) def test_maxlen(self): + collections = self.collections d = collections.deque([], 3) d.append(1); d.append(2); d.append(3); d.append(4) assert list(d) == [2, 3, 4] @@ -95,11 +112,13 @@ assert repr(d3) == "deque([2, 3, 4], maxlen=3)" def test_count(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) assert d.count(2) == 3 assert d.count(4) == 0 def test_reverse(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) d.reverse() assert list(d) == [2, 3, 2, 2, 1] @@ -109,6 +128,7 @@ assert list(d) == range(99, -1, -1) def test_subclass_with_kwargs(self): + collections = self.collections class SubclassWithKwargs(collections.deque): def __init__(self, newarg=1): collections.deque.__init__(self) @@ -116,11 +136,13 @@ # SF bug #1486663 -- this used to erroneously raise a TypeError SubclassWithKwargs(newarg=1) -def foobar(): - return list +class AppTestDefaultDict: -class TestDefaultDict: + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_basic(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory is None d1.default_factory = list @@ -148,20 +170,23 @@ assert 12 not in d2.keys() d2.default_factory = None assert d2.default_factory == None - py.test.raises(KeyError, d2.__getitem__, 15) - py.test.raises(TypeError, collections.defaultdict, 1) + raises(KeyError, d2.__getitem__, 15) + raises(TypeError, collections.defaultdict, 1) def test_constructor(self): + collections = self.collections assert collections.defaultdict(None) == {} assert collections.defaultdict(None, {1: 2}) == {1: 2} def test_missing(self): + collections = self.collections d1 = collections.defaultdict() - py.test.raises(KeyError, d1.__missing__, 42) + raises(KeyError, d1.__missing__, 42) d1.default_factory = list assert d1.__missing__(42) == [] def test_repr(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory == None assert repr(d1) == "defaultdict(None, {})" @@ -181,6 +206,7 @@ assert repr(d4) == "defaultdict(%s, {14: defaultdict(None, {})})" % repr(int) def test_recursive_repr(self): + collections = self.collections # Issue2045: stack overflow when default_factory is a bound method class sub(collections.defaultdict): def __init__(self): @@ -192,6 +218,7 @@ "defaultdict( Author: Armin Rigo Branch: copy-over-original2 Changeset: r448:d831591aa66f Date: 2013-07-26 11:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/d831591aa66f/ Log: fix tests diff --git a/c4/test/test_abort.py b/c4/test/test_abort.py --- a/c4/test/test_abort.py +++ b/c4/test/test_abort.py @@ -37,7 +37,7 @@ assert seen == range(5000) def test_global_to_local_copies(): - p1 = palloc(HDR) + p1 = palloc(HDR + WORD) # @perform_transaction def run(retry_counter): diff --git a/c4/test/test_atomic.py b/c4/test/test_atomic.py --- a/c4/test/test_atomic.py +++ b/c4/test/test_atomic.py @@ -20,7 +20,7 @@ def test_set_transaction_length(): lib.stm_set_transaction_length(5) # breaks after 4 read-or-writes - plist = [palloc(HDR) for i in range(6)] + plist = [palloc(HDR + WORD) for i in range(6)] should_br = ['?'] * (len(plist) + 1) # @perform_transaction diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -45,7 +45,7 @@ assert classify(p) == "protected" def test_private_with_backup(): - p = nalloc(HDR) + p = nalloc(HDR + WORD) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() r2 = lib.get_private_rev_num() @@ -88,7 +88,7 @@ assert lib.stm_id(p) != 0 def test_prebuilt_object_to_private(): - p = palloc(HDR) + p = palloc(HDR + WORD) flags = p.h_tid assert (flags & GCFLAG_PUBLIC_TO_PRIVATE) == 0 pid = lib.stm_id(p) @@ -158,7 +158,7 @@ assert p1.h_revision == int(ffi.cast("revision_t", p3)) # shortcutted def test_read_barrier_public_to_private(): - p = palloc(HDR) + p = palloc(HDR + WORD) pid = lib.stm_id(p) p2 = lib.stm_write_barrier(p) assert p2 != p @@ -173,7 +173,7 @@ assert pid == lib.stm_id(p2) def test_read_barrier_handle_protected(): - p = palloc(HDR) + p = palloc(HDR + WORD) p2 = lib.stm_write_barrier(p) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() @@ -188,7 +188,7 @@ assert list_of_read_objects() == [p2] def test_read_barrier_handle_private(): - p = palloc(HDR) + p = palloc(HDR + WORD) p2 = lib.stm_write_barrier(p) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() @@ -302,14 +302,14 @@ assert porig == ffi.NULL # } - p1 = oalloc(HDR) + p1 = oalloc(HDR + WORD) p1id = lib.stm_id(p1) p1r = lib.stm_read_barrier(p1) assert lib.stm_id(p1r) == p1id p1w = lib.stm_write_barrier(p1) assert lib.stm_id(p1w) == p1id - p2 = oalloc(HDR) + p2 = oalloc(HDR + WORD) p2w = lib.stm_write_barrier(p2) p2id = lib.stm_id(p2w) assert p2id == lib.stm_id(p2) @@ -581,8 +581,8 @@ pid = [] rid = [] def f1(r): - r1 = nalloc(HDR) - q1 = nalloc(HDR) + r1 = nalloc(HDR + WORD) + q1 = nalloc(HDR + WORD) p1 = lib.stm_write_barrier(p) # private copy plist.append(p1) qlist.append(q1) @@ -669,7 +669,7 @@ def test_prehash_simple(): - p = palloc(HDR, 99) + p = palloc(HDR + WORD, 99) assert lib.stm_hash(p) == 99 assert lib.stm_id(p) != lib.stm_hash(p) pr = lib.stm_read_barrier(p) @@ -685,7 +685,7 @@ def test_prehash_with_stealing(): - p = palloc(HDR, 99) + p = palloc(HDR + WORD, 99) def f1(r): lib.stm_write_barrier(p) # private copy lib.stm_commit_transaction() From noreply at buildbot.pypy.org Fri Jul 26 12:37:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 12:37:33 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Make two flags: VISITED vs MARKED. See doc in et.h. Message-ID: <20130726103733.5D8F21C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r449:e35685ae86d0 Date: 2013-07-26 12:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/e35685ae86d0/ Log: Make two flags: VISITED vs MARKED. See doc in et.h. diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -25,7 +25,11 @@ * * GCFLAG_OLD is set on old objects. * - * GCFLAG_VISITED is used temporarily during major collections. + * GCFLAG_VISITED and GCFLAG_MARKED are used temporarily during major + * collections. The objects are MARKED|VISITED as soon as they have been + * added to 'objects_to_trace', and so will be or have been traced. The + * objects are only MARKED if their memory must be kept alive, but (so far) + * we found that tracing them is not useful. * * GCFLAG_PUBLIC is set on public objects. * @@ -74,10 +78,12 @@ static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; static const revision_t GCFLAG_SMALLSTUB = STM_FIRST_GCFLAG << 12; +static const revision_t GCFLAG_MARKED = STM_FIRST_GCFLAG << 13; /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ + GCFLAG_MARKED | \ GCFLAG_PREBUILT_ORIGINAL | \ GCFLAG_OLD | \ GCFLAG_PUBLIC) @@ -88,12 +94,14 @@ "PREBUILT_ORIGINAL", \ "PUBLIC_TO_PRIVATE", \ "WRITE_BARRIER", \ - "MOVED", \ + "MOVED", \ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ - "HAS_ID", \ - "IMMUTABLE", \ + "HAS_ID", \ + "IMMUTABLE", \ + "SMALLSTUB", \ + "MARKED", \ NULL } #define IS_POINTER(v) (!((v) & 1)) /* even-valued number */ diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -276,8 +276,12 @@ if (obj->h_tid & GCFLAG_VISITED) return; /* already visited */ - obj->h_tid |= GCFLAG_VISITED; + obj->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; gcptrlist_insert2(&objects_to_trace, obj, (gcptr)gcp); + + obj = (gcptr)obj->h_original; + if (obj != NULL) + obj->h_tid |= GCFLAG_MARKED; } static gcptr visit_public(gcptr obj, struct tx_public_descriptor *gcp) @@ -369,7 +373,7 @@ copy_over_original(obj, original); /* return this original */ - original->h_tid |= GCFLAG_VISITED; + original->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; if (!(original->h_tid & GCFLAG_STUB)) gcptrlist_insert2(&objects_to_trace, original, NULL); return original; @@ -426,13 +430,15 @@ contains all the ones that have been modified. Because they are themselves not in any page managed by this file, their GCFLAG_VISITED is not removed at the end of the current - collection. That's why we remove it here. */ + collection. That's why we remove it here. GCFLAG_MARKED is not + relevant for prebuilt objects, but we avoid objects with MARKED + but not VISITED, which trigger some asserts. */ gcptr *pobj = stm_prebuilt_gcroots.items; gcptr *pend = stm_prebuilt_gcroots.items + stm_prebuilt_gcroots.size; gcptr obj, obj2; for (; pobj != pend; pobj++) { obj = *pobj; - obj->h_tid &= ~GCFLAG_VISITED; + obj->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); obj2 = visit_public(obj, NULL); @@ -680,7 +686,9 @@ and the flag is removed; other locations are marked as free. */ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) break; /* first object that stays alive */ p = (gcptr)(((char *)p) + obj_size); } @@ -690,8 +698,10 @@ surviving_pages = lpage; p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); mc_total_in_use += obj_size; } else { @@ -717,6 +727,7 @@ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { assert(!(p->h_tid & GCFLAG_VISITED)); + assert(!(p->h_tid & GCFLAG_MARKED)); if (p->h_tid != DEBUG_WORD(0xDD)) { dprintf(("| freeing %p (with page %p)\n", p, lpage)); } @@ -746,8 +757,10 @@ G2L_LOOP_FORWARD(gcp->nonsmall_objects, item) { gcptr p = item->addr; - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); } else { G2L_LOOP_DELETE(item); diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -23,7 +23,8 @@ #define STM_SIZE_OF_USER_TID (sizeof(revision_t) / 2) /* in bytes */ #define STM_FIRST_GCFLAG (1L << (8 * STM_SIZE_OF_USER_TID)) #define STM_USER_TID_MASK (STM_FIRST_GCFLAG - 1) -#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 4 + 8)) +#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * ((1<<0) | (1<<1) | \ + (1<<2) | (1<<3) | (1<<13))) #define PREBUILT_REVISION 1 diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -104,7 +104,7 @@ assert ffi.string(c).endswith("ei424242ee") def test_pointer_equal(): - p = palloc(HDR) + p = palloc(HDR + WORD) assert lib.stm_pointer_equal(p, p) assert not lib.stm_pointer_equal(p, ffi.NULL) assert not lib.stm_pointer_equal(ffi.NULL, p) diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -511,3 +511,24 @@ # major_collect() check_free_old(p1) + +def test_keep_original_alive(): + p2 = oalloc(HDR + WORD); make_public(p2) + p2b = lib.stm_write_barrier(p2) + lib.stm_push_root(p2) + minor_collect() + p2 = lib.stm_pop_root() + p2b = lib.stm_write_barrier(p2) + assert not lib.in_nursery(p2) + assert not lib.in_nursery(p2b) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p2) == "public" + assert classify(p2b) == "protected" + assert ffi.cast("gcptr", p2b.h_original) == p2 + lib.stm_push_root(p2b) + major_collect() + p2c = lib.stm_pop_root() + check_not_free(ffi.cast("gcptr", p2c.h_original)) + assert p2c == p2b + assert ffi.cast("gcptr", p2c.h_original) == p2 diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -279,7 +279,7 @@ assert classify(p2) == "private" def test_new_version(): - p1 = oalloc(HDR) + p1 = oalloc(HDR + WORD) assert lib.stm_write_barrier(p1) == p1 lib.stm_push_root(p1) transaction_break() @@ -300,7 +300,7 @@ assert classify(p2) == "private_from_protected" def test_prebuilt_version(): - p1 = lib.pseudoprebuilt(HDR, 42 + HDR) + p1 = lib.pseudoprebuilt(HDR + WORD, 42 + HDR + WORD) p2 = lib.stm_write_barrier(p1) assert p2 != p1 check_prebuilt(p1) diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -68,49 +68,25 @@ static _Bool is_partially_visited(gcptr obj) { - /* Based on gcpage.c:visit(). Check the code here if we simplify - visit(). Returns True or False depending on whether we find any + /* Based on gcpage.c:visit_public(). Check the code here if we change + visit_public(). Returns True or False depending on whether we find any version of 'obj' to be VISITED or not. */ - restart: + assert(IMPLIES(obj->h_tid & GCFLAG_VISITED, + obj->h_tid & GCFLAG_MARKED)); if (obj->h_tid & GCFLAG_VISITED) return 1; - if (obj->h_revision & 1) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_STUB)); + if (!(obj->h_tid & GCFLAG_PUBLIC)) return 0; - } - else if (obj->h_tid & GCFLAG_PUBLIC) { - /* h_revision is a ptr: we have a more recent version */ - if (!(obj->h_revision & 2)) { - /* go visit the more recent version */ - obj = (gcptr)obj->h_revision; - } - else { - /* it's a stub */ - assert(obj->h_tid & GCFLAG_STUB); - obj = (gcptr)(obj->h_revision - 2); - } - goto restart; - } - else { - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - gcptr B = (gcptr)obj->h_revision; - assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - if (B->h_tid & GCFLAG_VISITED) + + if (obj->h_original != 0 && + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + gcptr original = (gcptr)obj->h_original; + assert(IMPLIES(original->h_tid & GCFLAG_VISITED, + original->h_tid & GCFLAG_MARKED)); + if (original->h_tid & GCFLAG_MARKED) return 1; - assert(!(obj->h_tid & GCFLAG_STUB)); - assert(!(B->h_tid & GCFLAG_STUB)); - - if (IS_POINTER(B->h_revision)) { - assert(B->h_tid & GCFLAG_PUBLIC); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(B->h_revision & 2)); - - obj = (gcptr)B->h_revision; - goto restart; - } } return 0; } From noreply at buildbot.pypy.org Fri Jul 26 12:37:34 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 12:37:34 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix test Message-ID: <20130726103734.7AA351C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r450:e6aa22bd6c25 Date: 2013-07-26 12:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/e6aa22bd6c25/ Log: Fix test diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -77,8 +77,9 @@ static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; -static const revision_t GCFLAG_SMALLSTUB = STM_FIRST_GCFLAG << 12; +static const revision_t GCFLAG_SMALLSTUB /*debug*/ = STM_FIRST_GCFLAG << 12; static const revision_t GCFLAG_MARKED = STM_FIRST_GCFLAG << 13; +/* warning, the last flag available is "<< 15" on 32-bit */ /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ diff --git a/c4/test/test_weakref.py b/c4/test/test_weakref.py --- a/c4/test/test_weakref.py +++ b/c4/test/test_weakref.py @@ -109,12 +109,14 @@ lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() # + assert lib.rawgetptr(p1, 0) == p2 assert lib.rawgetlong(p2, 0) == 0 lib.stm_push_root(p1) lib.stm_push_root(p2) major_collect() p2b = lib.stm_pop_root() p1 = lib.stm_pop_root() - assert lib.rawgetptr(p1, 0) == p2b + assert lib.rawgetptr(p1, 0) == p2 assert p2b != p2 assert lib.getlong(p2b, 0) == 912809218 + assert lib.getlong(p2, 0) == 912809218 diff --git a/c4/weakref.c b/c4/weakref.c --- a/c4/weakref.c +++ b/c4/weakref.c @@ -70,11 +70,11 @@ { /* Based on gcpage.c:visit_public(). Check the code here if we change visit_public(). Returns True or False depending on whether we find any - version of 'obj' to be VISITED or not. + version of 'obj' to be MARKED or not. */ assert(IMPLIES(obj->h_tid & GCFLAG_VISITED, obj->h_tid & GCFLAG_MARKED)); - if (obj->h_tid & GCFLAG_VISITED) + if (obj->h_tid & GCFLAG_MARKED) return 1; if (!(obj->h_tid & GCFLAG_PUBLIC)) From noreply at buildbot.pypy.org Fri Jul 26 13:46:27 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 26 Jul 2013 13:46:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: improve asm of fastpath in stm barriers Message-ID: <20130726114627.C72CF1C002A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65685:fd02ae6aa5ba Date: 2013-07-26 13:45 +0200 http://bitbucket.org/pypy/pypy/changeset/fd02ae6aa5ba/ Log: improve asm of fastpath in stm barriers diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -441,14 +441,14 @@ def __init__(self, gc_ll_descr, stmcat): assert stmcat == 'P2R' STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_read_barrier') + 'stm_DirectReadBarrier') class STMWriteBarrierDescr(STMBarrierDescr): def __init__(self, gc_ll_descr, stmcat): assert stmcat in ['P2W'] STMBarrierDescr.__init__(self, gc_ll_descr, stmcat, - 'stm_write_barrier') + 'stm_WriteBarrier') class GcLLDescr_framework(GcLLDescription): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2204,6 +2204,7 @@ assert isinstance(descr, STMBarrierDescr) assert descr.returns_modified_object loc_base = arglocs[0] + temp_loc = arglocs[1] assert isinstance(loc_base, RegLoc) helper_num = 0 @@ -2242,32 +2243,22 @@ mc.J_il8(rx86.Conditions['NZ'], 0) # patched below jnz_location = mc.get_relative_pos() + # FXCACHE_AT(obj) != obj if isinstance(descr, STMReadBarrierDescr): - # FXCACHE_AT(obj) != obj - # XXX: optimize... - temp = loc_base.find_unused_reg() - mc.PUSH_r(temp.value) - mc.MOV_rr(temp.value, loc_base.value) - mc.AND_ri(temp.value, StmGC.FX_MASK) - - # XXX: addressings like [rdx+rax*1] don't seem to work + # calculate: temp = obj & FX_MASK + assert StmGC.FX_MASK == 65535 + mc.MOVZX16(temp_loc, loc_base) + # calculate: rbc + temp == obj rbc = self._get_stm_read_barrier_cache_addr() stmtlocal.tl_segment_prefix(mc) mc.MOV_rj(X86_64_SCRATCH_REG.value, rbc) - mc.ADD_rr(X86_64_SCRATCH_REG.value, temp.value) - mc.CMP(loc_base, mem(X86_64_SCRATCH_REG, 0)) - mc.POP_r(temp.value) + mc.CMP_ra(loc_base.value, + (X86_64_SCRATCH_REG.value, temp_loc.value, 0, 0)) mc.J_il8(rx86.Conditions['Z'], 0) # patched below jz_location2 = mc.get_relative_pos() - # : mov rdx,0xffffffffffffffb0 - # : movzx eax,di - # : mov rdx,QWORD PTR fs:[rdx] - # : cmp rdi,QWORD PTR [rdx+rax*1] - # : je 0x401f61 - # : jmp 0x6a59f0 - + + # obj->h_tid & GCFLAG_WRITE_BARRIER) != 0 if isinstance(descr, STMWriteBarrierDescr): - # obj->h_tid & GCFLAG_WRITE_BARRIER) != 0 if loc_base == ebp: #mc.MOV_rb(X86_64_SCRATCH_REG.value, StmGC.H_TID) mc.TEST8_bi(StmGC.H_TID, StmGC.GCFLAG_WRITE_BARRIER) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -797,8 +797,25 @@ for i in range(N)] self.perform_discard(op, arglocs) + def consider_cond_call_stm_b(self, op): + assert op.result is None + args = op.getarglist() + N = len(args) + assert N == 1 + # we force all arguments in a reg (unless they are Consts), + # because it will be needed anyway by the following setfield_gc + # or setarrayitem_gc. It avoids loading it twice from the memory. + tmp_box = TempBox() + tmp_loc = self.rm.force_allocate_reg(tmp_box, args) + args = args + [tmp_box] + + arglocs = [self.rm.make_sure_var_in_reg(op.getarg(i), args) + for i in range(N)] + [tmp_loc] + + self.perform_discard(op, arglocs) + self.rm.possibly_free_var(tmp_box) + consider_cond_call_gc_wb_array = consider_cond_call_gc_wb - consider_cond_call_stm_b = consider_cond_call_gc_wb def consider_call_malloc_nursery(self, op): gc_ll_descr = self.assembler.cpu.gc_ll_descr From noreply at buildbot.pypy.org Fri Jul 26 15:24:32 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 26 Jul 2013 15:24:32 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: document branch which allows subclassing ndarray Message-ID: <20130726132432.1CC481C00B1@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ndarray-subtype Changeset: r65686:f9bd0aa9782a Date: 2013-07-26 16:21 +0300 http://bitbucket.org/pypy/pypy/changeset/f9bd0aa9782a/ Log: document branch which allows subclassing ndarray diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,6 @@ .. branch: statvfs_tests Added some addition tests for statvfs. + +.. branch: ndarray-subtypes +Allow subclassing ndarray, i.e. matrix From noreply at buildbot.pypy.org Fri Jul 26 15:24:33 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 26 Jul 2013 15:24:33 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-subtype: close branch about to be merged Message-ID: <20130726132433.56DD01C00B1@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ndarray-subtype Changeset: r65687:09d1cffef5f5 Date: 2013-07-26 16:22 +0300 http://bitbucket.org/pypy/pypy/changeset/09d1cffef5f5/ Log: close branch about to be merged From noreply at buildbot.pypy.org Fri Jul 26 15:24:35 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 26 Jul 2013 15:24:35 +0200 (CEST) Subject: [pypy-commit] pypy default: merge ndarray-subtype which allows subclassing ndarray Message-ID: <20130726132435.78A5C1C00B1@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r65688:4f882fde971e Date: 2013-07-26 16:23 +0300 http://bitbucket.org/pypy/pypy/changeset/4f882fde971e/ Log: merge ndarray-subtype which allows subclassing ndarray diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,6 @@ .. branch: statvfs_tests Added some addition tests for statvfs. + +.. branch: ndarray-subtypes +Allow subclassing ndarray, i.e. matrix diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -229,7 +229,7 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return chunks.apply(orig_arr) + return chunks.apply(space, orig_arr) def descr_setitem(self, space, orig_arr, w_index, w_value): try: @@ -238,7 +238,7 @@ except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) - view = chunks.apply(orig_arr) + view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value) def transpose(self, orig_array): @@ -269,14 +269,14 @@ shape, skip) return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - def swapaxes(self, orig_arr, axis1, axis2): + def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] strides = self.get_strides()[:] backstrides = self.get_backstrides()[:] shape[axis1], shape[axis2] = shape[axis2], shape[axis1] strides[axis1], strides[axis2] = strides[axis2], strides[axis1] backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] - return W_NDimArray.new_slice(self.start, strides, + return W_NDimArray.new_slice(space, self.start, strides, backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): @@ -289,13 +289,16 @@ return ArrayBuffer(self) def astype(self, space, dtype): - new_arr = W_NDimArray.from_shape(self.get_shape(), dtype) + strides, backstrides = support.calc_strides(self.get_shape(), dtype, + self.order) + impl = ConcreteArray(self.get_shape(), dtype, self.order, + strides, backstrides) if self.dtype.is_str_or_unicode() and not dtype.is_str_or_unicode(): raise OperationError(space.w_NotImplementedError, space.wrap( "astype(%s) not implemented yet" % self.dtype)) else: - loop.setslice(space, new_arr.get_shape(), new_arr.implementation, self) - return new_arr + loop.setslice(space, impl.get_shape(), impl, self) + return impl class ConcreteArrayNotOwning(BaseConcreteArray): diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -139,7 +139,7 @@ if not new_shape: return self if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(new_shape, self.dtype) + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) arr_iter.setitem(self.value) return arr.implementation @@ -152,7 +152,7 @@ def create_axis_iter(self, shape, dim, cum): raise Exception("axis iter should not happen on scalar") - def swapaxes(self, orig_array, axis1, axis2): + def swapaxes(self, space, orig_array, axis1, axis2): raise Exception("should not be called") def fill(self, w_value): @@ -166,7 +166,7 @@ return space.wrap(0) def astype(self, space, dtype): - return W_NDimArray.new_scalar(space, dtype, self.value) + raise Exception("should not be called") def base(self): return None diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -126,7 +126,7 @@ axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(arr.get_shape(), dtype) + index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -10,6 +10,15 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) +def wrap_impl(space, w_cls, w_instance, impl): + if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): + w_ret = W_NDimArray(impl) + else: + w_ret = space.allocate_instance(W_NDimArray, w_cls) + W_NDimArray.__init__(w_ret, impl) + assert isinstance(w_ret, W_NDimArray) + space.call_method(w_ret, '__array_finalize__', w_instance) + return w_ret class ArrayArgumentException(Exception): pass @@ -20,10 +29,11 @@ def __init__(self, implementation): assert isinstance(implementation, BaseArrayImplementation) + assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod - def from_shape(shape, dtype, order='C'): + def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -32,10 +42,12 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) + if w_instance: + return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(shape, storage, dtype, order='C', owning=False): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -46,15 +58,20 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) + if w_subtype: + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_method(w_ret, '__array_finalize__', w_subtype) + return w_ret return W_NDimArray(impl) @staticmethod - def new_slice(offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): from pypy.module.micronumpy.arrayimpl import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) - return W_NDimArray(impl) + return wrap_impl(space, space.type(orig_arr), orig_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -35,10 +35,12 @@ class BadToken(Exception): pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -184,14 +186,23 @@ def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) - return False - #return w_obj.boolval + return w_obj.boolval def is_w(self, w_obj, w_what): return w_obj is w_what + def issubtype(self, w_type1, w_type2): + return BoolObject(True) + def type(self, w_obj): - return w_obj.tp + if self.is_none(w_obj): + return self.w_None + try: + return w_obj.tp + except AttributeError: + if isinstance(w_obj, W_NDimArray): + return W_NDimArray + return self.w_None def gettypefor(self, w_obj): return None @@ -199,6 +210,11 @@ def call_function(self, tp, w_dtype): return w_dtype + def call_method(self, w_obj, s, *args): + # XXX even the hacks have hacks + return None + #return getattr(w_obj, 'descr_' + s)(self, *args) + @specialize.arg(1) def interp_w(self, tp, what): assert isinstance(what, tp) @@ -329,6 +345,8 @@ self.name = name.strip(" ") def execute(self, interp): + if self.name == 'None': + return None return interp.variables[self.name] def __repr__(self): @@ -451,6 +469,32 @@ def __repr__(self): return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) +class ArrayClass(Node): + def __init__(self): + self.v = W_NDimArray + + def execute(self, interp): + return self.v + + def __repr__(self): + return '' + +class DtypeClass(Node): + def __init__(self, dt): + self.v = dt + + def execute(self, interp): + if self.v == 'int': + dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'float': + dtype = get_dtype_cache(interp.space).w_float64dtype + else: + raise BadToken('unknown v to dtype "%s"' % self.v) + return dtype + + def __repr__(self): + return '' % self.v + class Execute(Node): def __init__(self, expr): self.expr = expr @@ -533,6 +577,14 @@ w_res = where(interp.space, arr, arg1, arg2) else: assert False + elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: + if len(self.args) != 2: + raise ArgumentMismatch + arg = self.args[1].execute(interp) + if self.name == 'view': + w_res = arr.descr_view(interp.space, arg) + else: + assert False else: raise WrongFunctionName if isinstance(w_res, W_NDimArray): @@ -652,8 +704,14 @@ if token.name == 'identifier': if tokens.remaining() and tokens.get(0).name == 'paren_left': stack.append(self.parse_function_call(token.v, tokens)) + elif token.v.strip(' ') == 'ndarray': + stack.append(ArrayClass()) + elif token.v.strip(' ') == 'int': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'float': + stack.append(DtypeClass('float')) else: - stack.append(Variable(token.v)) + stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': stack.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'operator': diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -88,7 +88,7 @@ y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2): @@ -131,7 +131,8 @@ arr.get_dtype()) if _axis < 0 or len(arr.get_shape()) <= _axis: raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) - res = W_NDimArray.from_shape(shape, dtype, 'C') + # concatenate does not handle ndarray subtypes, it always returns a ndarray + res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: @@ -139,7 +140,7 @@ continue chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, arr.get_shape()[_axis]) - Chunks(chunks).apply(res).implementation.setslice(space, arr) + Chunks(chunks).apply(space, res).implementation.setslice(space, arr) axis_start += arr.get_shape()[_axis] return res @@ -150,22 +151,22 @@ arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, - orig_size)]).apply(res).implementation.setslice(space, arr) + orig_size)]).apply(space, w_res).implementation.setslice(space, arr) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) - Chunks(chunks).apply(res).implementation.setslice(space, arr) - return res + Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) + return w_res def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) @@ -261,7 +262,7 @@ else: shape = (shape[:axis2] + shape[axis2 + 1:axis1] + shape[axis1 + 1:] + [size]) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) if size == 0: return out if shapelen == 2: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -41,7 +41,7 @@ dtype = w_arr_list[0].get_dtype() for w_arr in w_arr_list[1:]: dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) - out = base.W_NDimArray.from_shape(shape, dtype) + out = base.W_NDimArray.from_shape(space, shape, dtype) return out diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -64,8 +64,8 @@ base_iter.next_skip_x(start) if length == 1: return base_iter.getitem() - res = W_NDimArray.from_shape([length], base.get_dtype(), - base.get_order()) + res = W_NDimArray.from_shape(space, [length], base.get_dtype(), + base.get_order(), w_instance=base) return loop.flatiter_getitem(res, base_iter, step) def descr_setitem(self, space, w_idx, w_value): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, issequence_w + ArrayArgumentException, issequence_w, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ interp_arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ @@ -85,8 +85,8 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - res = W_NDimArray.from_shape(res_shape, self.get_dtype()) - return loop.getitem_filter(res, self, arr) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) + return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): @@ -145,12 +145,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return chunks.apply(self) + return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] - res = W_NDimArray.from_shape(shape, self.get_dtype(), self.get_order()) - if not res.get_size(): - return res - return loop.getitem_array_int(space, self, res, iter_shape, indexes, + w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), + self.get_order(), w_instance=self) + if not w_res.get_size(): + return w_res + return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, prefix) def setitem_array_int(self, space, w_index, w_value): @@ -161,7 +162,7 @@ # w_index is a list of slices w_value = convert_to_array(space, w_value) chunks = self.implementation._prepare_slice_args(space, w_index) - view = chunks.apply(self) + view = chunks.apply(space, self) view.implementation.setslice(space, w_value) return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, @@ -259,14 +260,17 @@ return self.implementation.get_scalar_value() def descr_copy(self, space): - return W_NDimArray(self.implementation.copy(space)) + copy = self.implementation.copy(space) + w_subtype = space.type(self) + return wrap_impl(space, w_subtype, self, copy) def descr_get_real(self, space): - return W_NDimArray(self.implementation.get_real(self)) + return wrap_impl(space, space.type(self), self, + self.implementation.get_real(self)) def descr_get_imag(self, space): ret = self.implementation.get_imag(self) - return W_NDimArray(ret) + return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): # copy (broadcast) values into self @@ -298,7 +302,7 @@ new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) new_impl = self.implementation.reshape(space, self, new_shape) if new_impl is not None: - return W_NDimArray(new_impl) + return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: @@ -326,7 +330,7 @@ """ if self.is_scalar(): return self - return self.implementation.swapaxes(self, axis1, axis2) + return self.implementation.swapaxes(space, self, axis1, axis2) def descr_tolist(self, space): if len(self.get_shape()) == 0: @@ -446,17 +450,24 @@ # we must do that, because we need a working set. otherwise # we would modify the array in-place. Use this to our advantage # by converting nonnative byte order. + if self.is_scalar(): + return space.wrap(0) s = self.get_dtype().name if not self.get_dtype().native: s = s[1:] dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] contig = self.implementation.astype(space, dtype) - return contig.implementation.argsort(space, w_axis) + return contig.argsort(space, w_axis) def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - return self.implementation.astype(space, dtype) + impl = self.implementation + if isinstance(impl, scalar.Scalar): + return W_NDimArray.new_scalar(space, dtype, impl.value) + else: + new_impl = impl.astype(space, dtype) + return wrap_impl(space, space.type(self), self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -471,9 +482,9 @@ loop.byteswap(self.implementation, self.implementation) return self else: - res = W_NDimArray.from_shape(self.get_shape(), self.get_dtype()) - loop.byteswap(self.implementation, res.implementation) - return res + w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_instance=self) + loop.byteswap(self.implementation, w_res.implementation) + return w_res @unwrap_spec(mode=str) def descr_choose(self, space, w_choices, w_out=None, mode='raise'): @@ -564,7 +575,7 @@ if space.is_none(w_out): if self.get_dtype().is_bool_type(): #numpy promotes bool.round() to float16. Go figure. - w_out = W_NDimArray.from_shape(self.get_shape(), + w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) else: w_out = None @@ -578,6 +589,8 @@ else: calc_dtype = out.get_dtype() + if decimals == 0: + out = out.descr_view(space,space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out @@ -619,9 +632,13 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - if w_type is not None: - raise OperationError(space.w_NotImplementedError, space.wrap( - "view(... type=) not implemented yet")) + if not w_type and w_dtype: + try: + if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + w_type = w_dtype + w_dtype = None + except (OperationError, TypeError): + pass if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -651,8 +668,9 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize - return W_NDimArray(impl.get_view(self, dtype, new_shape)) - + v = impl.get_view(self, dtype, new_shape) + w_ret = wrap_impl(space, w_type, self, v) + return w_ret # --------------------- operations ---------------------------- @@ -760,9 +778,9 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - result = W_NDimArray.from_shape(out_shape, dtype) + w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, result, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) @unwrap_spec(w_axis = WrappedDefault(None)) @@ -884,14 +902,20 @@ isfortran = space.getitem(w_state, space.wrap(3)) storage = space.getitem(w_state, space.wrap(4)) - self.implementation = W_NDimArray.from_shape_and_storage([space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, owning=True).implementation + self.implementation = W_NDimArray.from_shape_and_storage(space, + [space.int_w(i) for i in space.listview(shape)], + rffi.str2charp(space.str_w(storage), track_allocation=False), + dtype, owning=True).implementation + def descr___array_finalize__(self, space, w_obj): + pass - at unwrap_spec(offset=int) + at unwrap_spec(offset=int, order=str) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, w_order=None): + offset=0, w_strides=None, order='C'): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray + from pypy.module.micronumpy.support import calc_strides if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_order) or not space.is_none(w_buffer)): raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) @@ -900,10 +924,19 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype) - return W_NDimArray.from_shape(shape, dtype) + if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): + return W_NDimArray.from_shape(space, shape, dtype, order) + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = ConcreteArray(shape, dtype.base, order, strides, + backstrides) + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_function(space.getattr(w_ret, + space.wrap('__array_finalize__')), w_subtype) + return w_ret @unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype): +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -912,9 +945,17 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), + w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape_and_storage(shape, storage, dtype) + if w_subtype: + if not space.isinstance_w(w_subtype, space.w_type): + raise OperationError(space.w_ValueError, space.wrap( + "subtype must be a subtype of ndarray, not a class instance")) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + 'C', False, w_subtype) + else: + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) W_NDimArray.typedef = TypeDef( "ndarray", @@ -1042,6 +1083,7 @@ W_NDimArray.fdel___pypy_data__), __reduce__ = interp2app(W_NDimArray.descr_reduce), __setstate__ = interp2app(W_NDimArray.descr_setstate), + __array_finalize__ = interp2app(W_NDimArray.descr___array_finalize__), ) @unwrap_spec(ndmin=int, copy=bool, subok=bool) @@ -1094,12 +1136,12 @@ dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape - arr = W_NDimArray.from_shape(shape, dtype, order=order) - arr_iter = arr.create_iter() + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + arr_iter = w_arr.create_iter() for w_elem in elems_w: arr_iter.setitem(dtype.coerce(space, w_elem)) arr_iter.next() - return arr + return w_arr @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): @@ -1109,7 +1151,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(shape, dtype=dtype, order=order)) + return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): @@ -1119,10 +1161,10 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - arr = W_NDimArray.from_shape(shape, dtype=dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) - arr.fill(one) - return space.wrap(arr) + w_arr.fill(one) + return space.wrap(w_arr) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -50,7 +50,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([num_items], dtype=dtype) + a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) ai = a.create_iter() for val in items: ai.setitem(val) @@ -71,7 +71,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([count], dtype=dtype) + a = W_NDimArray.from_shape(space, [count], dtype=dtype) loop.fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -181,7 +181,8 @@ temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() - temp = W_NDimArray.from_shape(temp_shape, dtype) + temp = W_NDimArray.from_shape(space, temp_shape, dtype, + w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -207,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -216,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape([obj.get_size()], dtype) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out @@ -295,7 +296,7 @@ return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) - return loop.call1(shape, self.func, calc_dtype, res_dtype, + return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out) @@ -370,7 +371,7 @@ return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) - return loop.call2(new_shape, self.func, calc_dtype, + return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out) @@ -450,7 +451,7 @@ return dt2 return dt1 return dt2 - else: + else: # increase to the next signed type dtypenum = dt2.num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] @@ -537,7 +538,13 @@ return current_guess if current_guess is complex_type: return complex_type - return interp_dtype.get_dtype_cache(space).w_float64dtype + if space.isinstance_w(w_obj, space.w_float): + return float_type + elif space.isinstance_w(w_obj, space.w_slice): + return long_dtype + raise operationerrfmt(space.w_NotImplementedError, + 'unable to create dtype from objects, ' '"%T" instance not supported', + w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -58,11 +58,11 @@ def __init__(self, name): self.name = name - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(arr.start + ofs, arr.get_strides(), + return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), arr.get_backstrides(), arr.shape, arr, orig_arr, subdtype) @@ -81,13 +81,13 @@ assert s >= 0 return shape[:] + old_shape[s:] - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation shape = self.extend_shape(arr.shape) r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), self.l) _, start, strides, backstrides = r - return W_NDimArray.new_slice(start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, orig_arr) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -19,9 +19,34 @@ reds = ['shape', 'w_lhs', 'w_rhs', 'out', 'left_iter', 'right_iter', 'out_iter']) -def call2(shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): +def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): + # handle array_priority + # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does: + # 1. if __array_priorities__ are equal and one is an ndarray and the + # other is a subtype, flip the order + # 2. elif rhs.__array_priority__ is higher, flip the order + # Now return the subtype of the first one + + w_ndarray = space.gettypefor(W_NDimArray) + lhs_type = space.type(w_lhs) + rhs_type = space.type(w_rhs) + lhs_for_subtype = w_lhs + rhs_for_subtype = w_rhs + #it may be something like a FlatIter, which is not an ndarray + if not space.is_true(space.issubtype(lhs_type, w_ndarray)): + lhs_type = space.type(w_lhs.base) + lhs_for_subtype = w_lhs.base + if not space.is_true(space.issubtype(rhs_type, w_ndarray)): + rhs_type = space.type(w_rhs.base) + rhs_for_subtype = w_rhs.base + if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray): + lhs_for_subtype = rhs_for_subtype + + # TODO handle __array_priorities__ and maybe flip the order + if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, + w_instance=lhs_for_subtype) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -48,9 +73,9 @@ reds = ['shape', 'w_obj', 'out', 'obj_iter', 'out_iter']) -def call1(shape, func, calc_dtype, res_dtype, w_obj, out): +def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) @@ -437,12 +462,12 @@ def tostring(space, arr): builder = StringBuilder() iter = arr.create_iter() - res_str = W_NDimArray.from_shape([1], arr.get_dtype(), order='C') + w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().itemtype.get_element_size() res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - res_str.implementation.get_storage_as_int(space)) + w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): - res_str.implementation.setitem(0, iter.getitem()) + w_res_str.implementation.setitem(0, iter.getitem()) for i in range(itemsize): builder.append(res_str_casted[i]) iter.next() diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -2,7 +2,7 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, - FunctionCall, FakeSpace) + FunctionCall, FakeSpace, W_NDimArray) class TestCompiler(object): @@ -84,6 +84,7 @@ assert interp.code.statements[0] == Assignment( 'a', Operator(Variable('b'), "+", FloatConstant(3))) + class TestRunner(object): def run(self, code): interp = numpy_compile(code) @@ -290,4 +291,32 @@ ''') assert interp.results[0].real == 0 assert interp.results[0].imag == 1 - + + def test_view_none(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = None + c = view(a, b) + c -> 0 + ''') + assert interp.results[0].value == 1 + + def test_view_ndarray(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = ndarray + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + + def test_view_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -976,3 +976,16 @@ assert a[0] == 1 assert (a + a)[1] == 4 +class AppTestObjectDtypes(BaseNumpyAppTest): + def test_scalar_from_object(self): + from numpypy import array + class Polynomial(object): + pass + try: + a = array(Polynomial()) + assert a.shape == () + except NotImplementedError, e: + if e.message.find('unable to create dtype from objects')>=0: + skip('creating ojbect dtype not supported yet') + + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -24,8 +24,8 @@ def get_size(self): return 1 -def create_slice(a, chunks): - return Chunks(chunks).apply(W_NDimArray(a)).implementation +def create_slice(space, a, chunks): + return Chunks(chunks).apply(space, W_NDimArray(a)).implementation def create_array(*args, **kwargs): @@ -46,100 +46,100 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] - a = create_array([1, 0, 7], MockDtype(), order='C') + a = create_array(self.space, [1, 0, 7], MockDtype(), order='C') assert a.strides == [7, 7, 1] assert a.backstrides == [0, 0, 6] def test_create_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -207,7 +207,8 @@ raw_storage_setitem(storage, i, rffi.cast(rffi.UCHAR, i)) # dtypes = get_dtype_cache(self.space) - w_array = W_NDimArray.from_shape_and_storage([2, 2], storage, dtypes.w_int8dtype) + w_array = W_NDimArray.from_shape_and_storage(self.space, [2, 2], + storage, dtypes.w_int8dtype) def get(i, j): return w_array.getitem(self.space, [i, j]).value assert get(0, 0) == 0 @@ -1442,7 +1443,7 @@ assert x.view('int8').shape == (10, 3) def test_ndarray_view_empty(self): - from numpypy import array, int8, int16, dtype + from numpypy import array, int8, int16 x = array([], dtype=[('a', int8), ('b', int8)]) y = x.view(dtype=int16) @@ -2876,6 +2877,12 @@ assert y[0, 1] == 2 y[0, 1] = 42 assert x[1] == 42 + class C(ndarray): + pass + z = ndarray._from_shape_and_storage([4, 1], addr, x.dtype, C) + assert isinstance(z, C) + assert z.shape == (4, 1) + assert z[1, 0] == 42 def test___pypy_data__(self): from numpypy import array @@ -2890,7 +2897,7 @@ class AppTestLongDoubleDtypes(BaseNumpyAppTest): def setup_class(cls): from pypy.module.micronumpy import Module - print dir(Module.interpleveldefs) + #print dir(Module.interpleveldefs) if not Module.interpleveldefs.get('longfloat', None): py.test.skip('no longdouble types yet') BaseNumpyAppTest.setup_class.im_func(cls) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -0,0 +1,223 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestSupport(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_NoNew = cls.space.appexec([], '''(): + from numpypy import ndarray + class NoNew(ndarray): + def __new__(cls, subtype): + raise ValueError('should not call __new__') + def __array_finalize__(self, obj): + + self.called_finalize = True + return NoNew ''') + cls.w_SubType = cls.space.appexec([], '''(): + from numpypy import ndarray, asarray + class SubType(ndarray): + def __new__(obj, input_array): + obj = asarray(input_array).view(obj) + obj.called_new = True + return obj + def __array_finalize__(self, obj): + self.called_finalize = True + return SubType ''') + + def test_subtype_base(self): + from numpypy import ndarray, dtype + class C(ndarray): + def __new__(subtype, shape, dtype): + self = ndarray.__new__(subtype, shape, dtype) + self.id = 'subtype' + return self + a = C([2, 2], int) + assert isinstance(a, C) + assert isinstance(a, ndarray) + assert a.shape == (2, 2) + assert a.dtype is dtype(int) + assert a.id == 'subtype' + a = a.reshape(1, 4) + b = a.reshape(4, 1) + assert isinstance(b, C) + #make sure __new__ was not called + assert not getattr(b, 'id', None) + a.fill(3) + b = a[0] + assert isinstance(b, C) + assert (b == 3).all() + b[0]=100 + assert a[0,0] == 100 + + def test_subtype_view(self): + from numpypy import ndarray, array + class matrix(ndarray): + def __new__(subtype, data, dtype=None, copy=True): + if isinstance(data, matrix): + return data + return data.view(subtype) + a = array(range(5)) + b = matrix(a) + assert isinstance(b, matrix) + assert (b == a).all() + + + def test_finalize(self): + #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray + import numpypy as np + class InfoArray(np.ndarray): + def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + strides=None, order='C', info=None): + obj = np.ndarray.__new__(subtype, shape, dtype, buffer, + offset, strides, order) + obj.info = info + return obj + + def __array_finalize__(self, obj): + if obj is None: + print 'finalize with None' + return + # printing the object itself will crash the test + print 'finalize with something',type(obj) + self.info = getattr(obj, 'info', None) + obj = InfoArray(shape=(3,)) + assert isinstance(obj, InfoArray) + assert obj.info is None + obj = InfoArray(shape=(3,), info='information') + assert obj.info == 'information' + v = obj[1:] + assert isinstance(v, InfoArray) + assert v.base is obj + assert v.info == 'information' + arr = np.arange(10) + cast_arr = arr.view(InfoArray) + assert isinstance(cast_arr, InfoArray) + assert cast_arr.base is arr + assert cast_arr.info is None + + def test_sub_where(self): + from numpypy import where, ones, zeros, array + a = array([1, 2, 3, 0, -3]) + v = a.view(self.NoNew) + b = where(array(v) > 0, ones(5), zeros(5)) + assert (b == [1, 1, 1, 0, 0]).all() + # where returns an ndarray irregardless of the subtype of v + assert not isinstance(b, self.NoNew) + + def test_sub_repeat(self): + from numpypy import repeat, array + a = self.SubType(array([[1, 2], [3, 4]])) + b = repeat(a, 3) + assert (b == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]).all() + assert isinstance(b, self.SubType) + + def test_sub_flatiter(self): + from numpypy import array + a = array(range(9)).reshape(3, 3).view(self.NoNew) + c = array(range(9)).reshape(3, 3) + assert isinstance(a.flat[:] + a.flat[:], self.NoNew) + assert isinstance(a.flat[:] + c.flat[:], self.NoNew) + assert isinstance(c.flat[:] + a.flat[:], self.NoNew) + assert not isinstance(c.flat[:] + c.flat[:], self.NoNew) + + def test_sub_getitem_filter(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + c = b[array([False, True, False, True, False])] + assert c.shape == (2,) + assert (c == [1, 3]).all() + assert isinstance(c, self.SubType) + assert b.called_new + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_getitem_array_int(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + assert b.called_new + c = b[array([3, 2, 1, 4])] + assert (c == [3, 2, 1, 4]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_round(self): + from numpypy import array + a = array(range(10), dtype=float).view(self.NoNew) + # numpy compatibility + b = a.round(decimals=0) + assert isinstance(b, self.NoNew) + b = a.round(decimals=1) + assert not isinstance(b, self.NoNew) + b = a.round(decimals=-1) + assert not isinstance(b, self.NoNew) + + def test_sub_dot(self): + # the returned type is that of the first argument + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = array(range(12)).reshape(4,3).view(self.SubType) + d = c.dot(a) + assert isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert d.called_finalize + d = a.dot(c) + assert not isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert not getattr(d, 'called_finalize', False) + + def test_sub_reduce(self): + # i.e. sum, max + # test for out as well + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = b.sum(axis=0) + assert (c == [12, 15, 18, 21]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + d = array(range(4)) + c = b.sum(axis=0, out=d) + assert c is d + assert not isinstance(c, self.SubType) + d = array(range(4)).view(self.NoNew) + c = b.sum(axis=0, out=d) + assert c is d + assert isinstance(c, self.NoNew) + + def test_sub_call2(self): + # c + a vs. a + c, what about array priority? + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = self.SubType(range(12)) + c = b + a + assert isinstance(c, self.SubType) + c = a + b + assert isinstance(c, self.NoNew) + d = range(12) + e = a - d + assert isinstance(e, self.NoNew) + + def test_sub_call1(self): + from numpypy import array, sqrt + a = array(range(12)).view(self.NoNew) + b = sqrt(a) + assert b.called_finalize == True + + def test_sub_astype(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.astype(float) + assert b.called_finalize == True + + def test_sub_reshape(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.reshape(3, 4) + assert b.called_finalize == True + From noreply at buildbot.pypy.org Fri Jul 26 15:55:39 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 26 Jul 2013 15:55:39 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: bah Message-ID: <20130726135539.8EF671C00B1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65689:fc056395b86d Date: 2013-07-26 14:17 +0200 http://bitbucket.org/pypy/pypy/changeset/fc056395b86d/ Log: bah diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2204,7 +2204,6 @@ assert isinstance(descr, STMBarrierDescr) assert descr.returns_modified_object loc_base = arglocs[0] - temp_loc = arglocs[1] assert isinstance(loc_base, RegLoc) helper_num = 0 @@ -2247,6 +2246,8 @@ if isinstance(descr, STMReadBarrierDescr): # calculate: temp = obj & FX_MASK assert StmGC.FX_MASK == 65535 + assert not is_frame + temp_loc = arglocs[1] # does not exist if is_frame! mc.MOVZX16(temp_loc, loc_base) # calculate: rbc + temp == obj rbc = self._get_stm_read_barrier_cache_addr() From noreply at buildbot.pypy.org Fri Jul 26 15:55:40 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 26 Jul 2013 15:55:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix Message-ID: <20130726135540.C7D3A1C00B1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65690:1f7031417ccb Date: 2013-07-26 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/1f7031417ccb/ Log: fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2260,15 +2260,14 @@ # obj->h_tid & GCFLAG_WRITE_BARRIER) != 0 if isinstance(descr, STMWriteBarrierDescr): + assert IS_X86_64 and (StmGC.GCFLAG_WRITE_BARRIER >> 32) > 0 + assert (StmGC.GCFLAG_WRITE_BARRIER >> 40) == 0 + off = 4 + flag = StmGC.GCFLAG_WRITE_BARRIER >> 32 if loc_base == ebp: - #mc.MOV_rb(X86_64_SCRATCH_REG.value, StmGC.H_TID) - mc.TEST8_bi(StmGC.H_TID, StmGC.GCFLAG_WRITE_BARRIER) + mc.TEST8_bi(StmGC.H_TID + off, flag) else: - # mc.MOV(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_TID)) - mc.TEST8_mi((loc_base.value, StmGC.H_TID), - StmGC.GCFLAG_WRITE_BARRIER) - #doesn't work: - # mc.TEST(X86_64_SCRATCH_REG, imm(StmGC.GCFLAG_WRITE_BARRIER)) + mc.TEST8_mi((loc_base.value, StmGC.H_TID + off), flag) mc.J_il8(rx86.Conditions['NZ'], 0) # patched below jnz_location2 = mc.get_relative_pos() diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -49,8 +49,8 @@ GCFLAG_STUB = first_gcflag << 8 # debug GCFLAG_PRIVATE_FROM_PROTECTED = first_gcflag << 9 GCFLAG_HAS_ID = first_gcflag << 10 - GCFLAG_IMMUTABLE = first_gcflag << 11; - GCFLAG_SMALLSTUB = first_gcflag << 12; + GCFLAG_IMMUTABLE = first_gcflag << 11 + GCFLAG_SMALLSTUB = first_gcflag << 12 PREBUILT_FLAGS = first_gcflag * (1 + 2 + 4 + 8) PREBUILT_REVISION = r_uint(1) From noreply at buildbot.pypy.org Fri Jul 26 16:31:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 16:31:09 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix the assert Message-ID: <20130726143109.8EB091C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r451:565a8b79ab12 Date: 2013-07-26 16:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/565a8b79ab12/ Log: Fix the assert diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -569,7 +569,8 @@ gcptr stm_WriteBarrier(gcptr P) { assert(!(P->h_tid & GCFLAG_IMMUTABLE)); - assert(stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); + assert((P->h_tid & GCFLAG_STUB) || + stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); /* If stmgc_size(P) gives a number <= sizeof(stub)-WORD, then there is a risk of overrunning the object later in gcpage.c when copying a stub over it. However such objects are so small that they contain no field From noreply at buildbot.pypy.org Fri Jul 26 16:31:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 16:31:10 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Remove a memcpy() which is not needed any more, I believe Message-ID: <20130726143110.A3F891C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r452:e8704368e5f9 Date: 2013-07-26 16:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/e8704368e5f9/ Log: Remove a memcpy() which is not needed any more, I believe diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -105,12 +105,9 @@ p, (gcptr)p->h_original)); } else { - /* must create shadow original object XXX: or use - backup, if exists */ + /* must create shadow original object + (XXX could use the backup, if it exists) */ gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p)); - memcpy(O, p, stmgc_size(p)); /* at least major collections - depend on some content of id_copy. - remove after fixing that XXX */ O->h_tid |= GCFLAG_OLD; p->h_original = (revision_t)O; From noreply at buildbot.pypy.org Fri Jul 26 16:31:11 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 16:31:11 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix. Message-ID: <20130726143111.B3D3A1C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r453:ba5331ab45cc Date: 2013-07-26 16:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/ba5331ab45cc/ Log: Fix. diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -291,25 +291,28 @@ h_original. Or, if gcp != NULL and the most recent copy is protected by precisely 'gcp', then we return it instead. */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + gcptr original; if (obj->h_original != 0 && - !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { original = (gcptr)obj->h_original; + /* the h_original may be protected, or private_from_protected, + in some cases. Then we can't use it. We'll use the most + recent h_revision which is public. */ + if (!(original->h_tid & GCFLAG_PUBLIC)) + original = NULL; + } else original = obj; - /* the original object must also be a public object, and cannot - be a small stub. */ - assert(original->h_tid & GCFLAG_PUBLIC); - assert(!(original->h_tid & GCFLAG_SMALLSTUB)); - - assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(original->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(original->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + /* the original object must not be a small stub. */ + assert(original == NULL || !(original->h_tid & GCFLAG_SMALLSTUB)); /* if 'original' was already visited, we are done */ - if (original->h_tid & GCFLAG_VISITED) + if (original != NULL && original->h_tid & GCFLAG_VISITED) return original; /* walk to the head of the chained list */ @@ -368,9 +371,17 @@ } } - /* copy obj over original */ - if (obj != original) + /* at this point, 'obj' contains the most recent revision which is + public. */ + if (original == NULL) { + original = obj; + if (original->h_tid & GCFLAG_VISITED) + return original; + } + else if (obj != original) { + /* copy obj over original */ copy_over_original(obj, original); + } /* return this original */ original->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; From noreply at buildbot.pypy.org Fri Jul 26 16:31:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 16:31:16 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix test Message-ID: <20130726143116.37BB81C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r454:f85126cb4b2c Date: 2013-07-26 16:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/f85126cb4b2c/ Log: Fix test diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -80,10 +80,7 @@ def test_prebuilt_is_public(): p = palloc(HDR) assert p.h_revision == 1 - assert p.h_tid == lib.gettid(p) | (GCFLAG_OLD | - GCFLAG_VISITED | - GCFLAG_PUBLIC | - GCFLAG_PREBUILT_ORIGINAL) + assert p.h_tid == lib.gettid(p) | lib.PREBUILT_FLAGS assert classify(p) == "public" assert lib.stm_id(p) != 0 From noreply at buildbot.pypy.org Fri Jul 26 16:59:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 16:59:12 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix (probably, but it seems confirmed by the fact that test_random doesn't Message-ID: <20130726145912.B9D661C002A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r455:e05ca53c99a1 Date: 2013-07-26 16:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/e05ca53c99a1/ Log: Fix (probably, but it seems confirmed by the fact that test_random doesn't complain about unexpected aborts) diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -593,7 +593,6 @@ items = d->list_of_read_objects.items; for (i = d->list_of_read_objects.size - 1; i >= 0; --i) { gcptr obj = items[i]; - assert(!(obj->h_tid & GCFLAG_STUB)); if (obj->h_tid & GCFLAG_MOVED) { assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); @@ -616,7 +615,7 @@ } revision_t v = obj->h_revision; - if (IS_POINTER(v)) { + if ((obj->h_tid & GCFLAG_STUB) || IS_POINTER(v)) { /* has a more recent revision. Oups. */ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); From noreply at buildbot.pypy.org Fri Jul 26 17:00:35 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 26 Jul 2013 17:00:35 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add more specializations. Message-ID: <20130726150035.833E01C002A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65691:ecbc6045476e Date: 2013-07-26 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/ecbc6045476e/ Log: Add more specializations. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -95,7 +95,7 @@ def descr_len(self, space): return space.wrap(self._len()) - @specialize.argtype(0) + #@specialize.argtype(0) #def descr_iter(self, space): # pass @@ -645,6 +645,7 @@ return space.w_False return space.newbool(self._startswith(space, value, w_prefix, start, end)) + @specialize.argtype(0) def _startswith(self, space, value, w_prefix, start, end): return startswith(value, self._op_val(space, w_prefix), start, end) @@ -660,6 +661,7 @@ return space.w_False return space.newbool(self._endswith(space, value, w_suffix, start, end)) + @specialize.argtype(0) def _endswith(self, space, value, w_prefix, start, end): return endswith(value, self._op_val(space, w_prefix), start, end) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -313,6 +313,7 @@ raise operationerrfmt(space.w_TypeError, "decoder did not return an unicode object (type '%s')", space.type(w_retval).getname(space)) + assert isinstance(w_retval, W_UnicodeObject) return w_retval def unicode_from_object(space, w_obj): From noreply at buildbot.pypy.org Fri Jul 26 17:09:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 17:09:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Ambiguous grammar (thanks Simon) Message-ID: <20130726150901.85DE01C002A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65692:e46fd79aed8e Date: 2013-07-26 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/e46fd79aed8e/ Log: Ambiguous grammar (thanks Simon) diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html From noreply at buildbot.pypy.org Fri Jul 26 18:54:25 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 26 Jul 2013 18:54:25 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: meh, revert 3fc9da9637c3 Message-ID: <20130726165425.D67EB1C318F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65693:63f3ab72fb5e Date: 2013-07-26 17:53 +0100 http://bitbucket.org/pypy/pypy/changeset/63f3ab72fb5e/ Log: meh, revert 3fc9da9637c3 Breaks test collection (e.g. "py.test rpython/" collects *nothing*) but fixes buildbot failures. diff --git a/rpython/jit/backend/arm/test/conftest.py b/rpython/jit/backend/arm/test/conftest.py --- a/rpython/jit/backend/arm/test/conftest.py +++ b/rpython/jit/backend/arm/test/conftest.py @@ -16,5 +16,7 @@ dest="run_translation_tests", help="run tests that translate code") -def pytest_ignore_collect(path, config): - return not cpu.startswith('arm') +def pytest_collect_directory(path, parent): + if not cpu.startswith('arm'): + py.test.skip("ARM(v7) tests skipped: cpu is %r" % (cpu,)) +pytest_collect_file = pytest_collect_directory From noreply at buildbot.pypy.org Fri Jul 26 19:12:04 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 26 Jul 2013 19:12:04 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: remove jvm leftovers Message-ID: <20130726171204.388731C3195@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65694:480b7924a59a Date: 2013-07-26 18:11 +0100 http://bitbucket.org/pypy/pypy/changeset/480b7924a59a/ Log: remove jvm leftovers diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -529,6 +529,7 @@ goals = self.backend_select_goals(goals) return self._execute(goals, task_skip = self._maybe_skip()) + @staticmethod def from_targetspec(targetspec_dic, config=None, args=None, empty_translator=None, disable=[], @@ -538,13 +539,6 @@ driver = TranslationDriver(config=config, default_goal=default_goal, disable=disable) - # patch some attributes of the os module to make sure they - # have the same value on every platform. - backend, ts = driver.get_backend_and_type_system() - if backend in ('cli', 'jvm'): - from rpython.translator.oosupport.support import patch_os - driver.old_cli_defs = patch_os() - target = targetspec_dic['target'] spec = target(driver, args) @@ -558,11 +552,8 @@ policy=policy, extra=targetspec_dic, empty_translator=empty_translator) - return driver - from_targetspec = staticmethod(from_targetspec) - def prereq_checkpt_rtype(self): assert 'rpython.rtyper.rmodel' not in sys.modules, ( "cannot fork because the rtyper has already been imported") diff --git a/rpython/translator/interactive.py b/rpython/translator/interactive.py --- a/rpython/translator/interactive.py +++ b/rpython/translator/interactive.py @@ -127,14 +127,3 @@ self.ensure_backend('c') self.driver.compile_c() return self.driver.c_entryp - - def compile_jvm(self, **kwds): - self.update_options(kwds) - self.ensure_backend('jvm') - self.driver.compile_jvm() - return self.driver.c_entryp - - def source_jvm(self, **kwds): - self.update_options(kwds) - self.ensure_backend('jvm') - self.driver.source_jvm() From noreply at buildbot.pypy.org Fri Jul 26 19:28:38 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 26 Jul 2013 19:28:38 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill rjvm Message-ID: <20130726172838.9A7D11C31BA@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65695:18f60cc5840e Date: 2013-07-26 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/18f60cc5840e/ Log: Kill rjvm diff --git a/rpython/rlib/rjvm.py b/rpython/rlib/rjvm.py deleted file mode 100644 --- a/rpython/rlib/rjvm.py +++ /dev/null @@ -1,79 +0,0 @@ - -import jpype -import atexit - - -class CallWrapper(object): - - def wrap_item(self, item): - if isinstance(item, jpype.java.lang.Object): - return JavaInstanceWrapper(item) - elif isinstance(item, jpype._jclass._JavaClass): - return JavaInstanceWrapper(item.__javaclass__) - elif isinstance(item, tuple) or isinstance(item, list): - return self.wrap_list(item) - return item - - def wrap_list(self, lst): - return [self.wrap_item(x) for x in lst] - - def __call__(self, *args, **kwargs): - result = self.__wrapped__(*args, **kwargs) - return self.wrap_item(result) - - -class JavaWrapper(CallWrapper): - def __init__(self, name): - self.__javaname__ = name - all_names = name.split(".") - temp_module = jpype - for n in all_names: - temp_module = getattr(temp_module, n) - self.__wrapped__ = temp_module - def __getattr__(self, attr): - if isinstance(getattr(self.__wrapped__, attr), type): - return JavaClassWrapper(getattr(self.__wrapped__, attr)) - elif isinstance(getattr(self.__wrapped__, attr), jpype.JPackage): - return JavaWrapper(self.__javaname__ + '.' + attr) - -class JavaInstanceWrapper(object): - def __init__(self, obj): - self.__wrapped__ = obj - - def __getattr__(self, attr): - return JavaMethodWrapper(getattr(self.__wrapped__, attr)) - -class JavaClassWrapper(CallWrapper): - def __init__(self, cls): - self.__wrapped__ = cls - - def __getattr__(self, attr): - result = None - try: - result = JavaStaticMethodWrapper(getattr(self.__wrapped__, attr)) - except AttributeError: - result = JavaStaticMethodWrapper(getattr(self.__wrapped__.__javaclass__, attr)) - return result - -class JavaMethodWrapper(CallWrapper): - - def __init__(self, meth): - self.__wrapped__ = meth - -class JavaStaticMethodWrapper(CallWrapper): - def __init__(self, static_meth): - self.__wrapped__ = static_meth - - - -jpype.startJVM(jpype.getDefaultJVMPath(), "-ea") -java = JavaWrapper("java") -JavaMethod = type(jpype.java.lang.Math.abs) - - -def cleanup(): - jpype.shutdownJVM() - -atexit.register(cleanup) - - diff --git a/rpython/rlib/test/test_rjvm.py b/rpython/rlib/test/test_rjvm.py deleted file mode 100644 --- a/rpython/rlib/test/test_rjvm.py +++ /dev/null @@ -1,44 +0,0 @@ -import py -py.test.skip('this is outdated. Check the jvm-improvements branch') - -try: - import jpype -except ImportError: - py.test.skip("In Progress...") - -from rpython.rlib.rjvm import java, JavaWrapper, JavaClassWrapper, JavaInstanceWrapper, JavaMethodWrapper, JavaStaticMethodWrapper - -def test_static_method(): - assert isinstance(java.lang, JavaWrapper) - assert isinstance(java.lang.Math, JavaClassWrapper) - assert isinstance(java.lang.Math.abs, JavaStaticMethodWrapper) - result = java.lang.Math.abs(-42) - assert isinstance(result, int) - assert result == 42 - -def test_class_instantiate(): - al = java.util.ArrayList() - assert isinstance(al, JavaInstanceWrapper) - assert isinstance(al.add, JavaMethodWrapper) - al.add("test") - assert al.get(0) == "test" - -def test_reflection(): - py.test.skip('in progress') - al_class = java.lang.Class.forName("java.util.ArrayList") - assert isinstance(al_class, JavaInstanceWrapper) - #meths = al_class.__javaclass__.getDeclaredMethods() - constructors = al_class.getDeclaredConstructors() - meths = al_class.getDeclaredMethods() - al = constructors[0].newInstance([]) - al_org = java.util.ArrayList() - assert isinstance(al, JavaInstanceWrapper) - assert isinstance(al.add, JavaMethodWrapper) - al_add = meths[2] - assert isinstance(al_add, JavaInstanceWrapper) - assert isinstance(al_add.invoke, JavaMethodWrapper) - # This fail, but work on the command line - al_add.invoke(al_org, ["Hello"]) - assert al_org[0] == "Hello" - al_add.invoke(al, ["Hello"]) - assert al[0] == "Hello" From noreply at buildbot.pypy.org Fri Jul 26 20:01:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 20:01:02 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Two extra passing tests Message-ID: <20130726180102.057681C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r456:0cbcc23311d3 Date: 2013-07-26 19:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/0cbcc23311d3/ Log: Two extra passing tests diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py --- a/c4/test/test_gcpage.py +++ b/c4/test/test_gcpage.py @@ -532,3 +532,64 @@ check_not_free(ffi.cast("gcptr", p2c.h_original)) assert p2c == p2b assert ffi.cast("gcptr", p2c.h_original) == p2 + +def test_more_h_original_1(): + p2 = oalloc_refs(1); make_public(p2) + lib.stm_push_root(p2) + def f1(_): + p1 = oalloc(HDR + WORD) + lib.rawsetlong(p1, 0, -8922908) + setptr(p2, 0, p1) + run_parallel(f1) + p2 = lib.stm_pop_root() + p1 = getptr(p2, 0) + assert classify(p1) == "stub" + assert not lib.in_nursery(p1) + p1org = p1.h_original + assert p1org != 0 + assert classify(ffi.cast("gcptr", p1org)) == "protected" + assert not lib.in_nursery(ffi.cast("gcptr", p1org)) + assert lib.getlong(p1, 0) == -8922908 + p1r = lib.stm_read_barrier(p1) # protected->public + assert classify(p1r) == "public" + assert p1r == ffi.cast("gcptr", p1org) + assert p1r.h_original == 0 + # + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + check_not_free(ffi.cast("gcptr", p1org)) + p1 = getptr(p2, 0) + assert p1 == p1r + assert p1.h_original == 0 + assert classify(p1) == "public" + assert ffi.cast("gcptr", p1org) == p1 + assert lib.getlong(p1, 0) == -8922908 + +def test_more_h_original_2(): + p2 = oalloc_refs(1); make_public(p2) + lib.stm_push_root(p2) + def f1(_): + p1 = oalloc(HDR + WORD) + lib.rawsetlong(p1, 0, -8922908) + setptr(p2, 0, p1) + run_parallel(f1) + p2 = lib.stm_pop_root() + p1 = getptr(p2, 0) + assert classify(p1) == "stub" + assert not lib.in_nursery(p1) + p1org = p1.h_original + assert p1org != 0 + assert classify(ffi.cast("gcptr", p1org)) == "protected" + assert not lib.in_nursery(ffi.cast("gcptr", p1org)) + # + lib.stm_push_root(p2) + major_collect() + p2 = lib.stm_pop_root() + check_not_free(p1) + check_not_free(ffi.cast("gcptr", p1org)) + p1b = getptr(p2, 0) + assert p1b == p1 + assert p1b.h_original == p1org + assert classify(p1b) == "stub" + assert lib.getlong(p1, 0) == -8922908 From noreply at buildbot.pypy.org Fri Jul 26 20:01:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 20:01:03 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: I think this is necessary, but I don't manage to write a test Message-ID: <20130726180103.33C841C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r457:bebb0740fe44 Date: 2013-07-26 19:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/bebb0740fe44/ Log: I think this is necessary, but I don't manage to write a test diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -302,8 +302,10 @@ /* the h_original may be protected, or private_from_protected, in some cases. Then we can't use it. We'll use the most recent h_revision which is public. */ - if (!(original->h_tid & GCFLAG_PUBLIC)) + if (!(original->h_tid & GCFLAG_PUBLIC)) { + original->h_tid |= GCFLAG_MARKED; original = NULL; + } } else original = obj; From noreply at buildbot.pypy.org Fri Jul 26 20:01:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 20:01:04 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: hg backout 5c385b2b629a: it's still needed Message-ID: <20130726180104.489E01C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r458:efe729c34c6f Date: 2013-07-26 19:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/efe729c34c6f/ Log: hg backout 5c385b2b629a: it's still needed diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -105,9 +105,12 @@ p, (gcptr)p->h_original)); } else { - /* must create shadow original object - (XXX could use the backup, if it exists) */ + /* must create shadow original object XXX: or use + backup, if exists */ gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p)); + memcpy(O, p, stmgc_size(p)); /* at least major collections + depend on some content of id_copy. + remove after fixing that XXX */ O->h_tid |= GCFLAG_OLD; p->h_original = (revision_t)O; From noreply at buildbot.pypy.org Fri Jul 26 20:01:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 20:01:05 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Increase the size, now that the shadowstacks are also allocated via dbgmem.c Message-ID: <20130726180105.990391C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r459:06d2d4b24b7e Date: 2013-07-26 19:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/06d2d4b24b7e/ Log: Increase the size, now that the shadowstacks are also allocated via dbgmem.c diff --git a/c4/dbgmem.c b/c4/dbgmem.c --- a/c4/dbgmem.c +++ b/c4/dbgmem.c @@ -8,7 +8,7 @@ #ifdef _GC_DEBUG /************************************************************/ -#define MMAP_TOTAL 671088640 /* 640MB */ +#define MMAP_TOTAL 1280*1024*1024 /* 1280MB */ static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; static char *zone_start, *zone_current = NULL, *zone_end = NULL; From noreply at buildbot.pypy.org Fri Jul 26 20:04:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 20:04:43 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: All tests pass including test_zdemo_random now. Message-ID: <20130726180443.97CD41C0413@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r460:ae8598df354b Date: 2013-07-26 20:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/ae8598df354b/ Log: All tests pass including test_zdemo_random now. diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -525,6 +525,6 @@ test_multi_thread(1624) def test_more_multi_thread(): - #py.test.skip("more random tests") - for i in range(200): + py.test.skip("more random tests") + for i in range(100): yield test_multi_thread, 1100 + i From noreply at buildbot.pypy.org Fri Jul 26 20:20:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 20:20:47 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Set the debugging flag SMALLSTUB if we allocated a small stub here. Message-ID: <20130726182047.89F741C002A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r461:b23bacf59968 Date: 2013-07-26 20:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/b23bacf59968/ Log: Set the debugging flag SMALLSTUB if we allocated a small stub here. diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -104,6 +104,8 @@ stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; + if (size == 0) + stub->h_tid |= GCFLAG_SMALLSTUB; stub->h_revision = ((revision_t)obj) | 2; if (obj->h_original) { stub->h_original = obj->h_original; From noreply at buildbot.pypy.org Fri Jul 26 20:48:48 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 26 Jul 2013 20:48:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Update the irc topic list (see https://bitbucket.org/pypy/chatlog). Message-ID: <20130726184848.038BD1C002A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65696:ea4433a5cfef Date: 2013-07-26 20:47 +0200 http://bitbucket.org/pypy/pypy/changeset/ea4433a5cfef/ Log: Update the irc topic list (see https://bitbucket.org/pypy/chatlog). diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase From noreply at buildbot.pypy.org Fri Jul 26 22:36:50 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 26 Jul 2013 22:36:50 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130726203650.2EABD1C0219@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65697:9f6ccdf73b59 Date: 2013-07-26 18:07 +0200 http://bitbucket.org/pypy/pypy/changeset/9f6ccdf73b59/ Log: Fix. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -189,13 +189,13 @@ def _startswith(self, space, value, w_prefix, start, end): if space.isinstance_w(w_prefix, space.w_unicode): self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._startswith(space, value, w_prefix, start, end) + return self_as_unicode._startswith(space, self_as_unicode._value, w_prefix, start, end) return StringMethods._startswith(self, space, value, w_prefix, start, end) def _endswith(self, space, value, w_suffix, start, end): if space.isinstance_w(w_suffix, space.w_unicode): self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._endswith(space, value, w_suffix, start, end) + return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) return StringMethods._endswith(self, space, value, w_suffix, start, end) def _join_return_one(self, space, w_obj): From noreply at buildbot.pypy.org Fri Jul 26 22:36:51 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 26 Jul 2013 22:36:51 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130726203651.7E6B71C0219@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65698:b14c953c9702 Date: 2013-07-26 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/b14c953c9702/ Log: Fix. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -44,6 +44,7 @@ assert len(char) == 1 return str(char)[0] + _empty = '' _builder = StringBuilder def _newlist_unwrapped(self, space, res): diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -86,6 +86,7 @@ assert len(char) == 1 return str(char)[0] + _empty = '' _builder = StringBuilder def _isupper(self, ch): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -144,7 +144,7 @@ assert start >= 0 and stop >= 0 return self._sliced(space, selfvalue, start, stop, self) else: - str = "".join([selfvalue[start + i*step] for i in range(sl)]) + str = self._empty.join([selfvalue[start + i*step] for i in range(sl)]) return self._new(str) index = space.getindex_w(w_index, space.w_IndexError, "string index") diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -87,6 +87,7 @@ assert len(char) == 1 return unicode(char)[0] + _empty = u'' _builder = UnicodeBuilder def _isupper(self, ch): From noreply at buildbot.pypy.org Fri Jul 26 22:36:52 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 26 Jul 2013 22:36:52 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130726203652.BF9BA1C0219@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65699:7218f93f845b Date: 2013-07-26 22:16 +0200 http://bitbucket.org/pypy/pypy/changeset/7218f93f845b/ Log: Fix. diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -144,8 +144,9 @@ w_format_spec = space.call_function(space.w_unicode, w_format_spec) spec = space.unicode_w(w_format_spec) formatter = newformat.unicode_formatter(space, spec) - return formatter.format_string(unicode_from_object(space, self)) - #return formatter.format_string(space.unicode_w(self)) + self2 = unicode_from_object(space, self) + assert isinstance(self2, W_UnicodeObject) + return formatter.format_string(self2._value) def descr_mod(self, space, w_values): return mod_format(space, self, w_values, do_unicode=True) From noreply at buildbot.pypy.org Sat Jul 27 03:44:14 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 03:44:14 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130727014414.5A64E1C002A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65700:1b8a81ea64cc Date: 2013-07-26 22:47 +0200 http://bitbucket.org/pypy/pypy/changeset/1b8a81ea64cc/ Log: hg merge default diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -3,14 +3,20 @@ =============== We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. -This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html Highlights ========== +* Support for os.statvfs and os.fstatvfs on unix systems. + * Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). -* Fixed issue `1552`_: GreenletExit should inherit from BaseException +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. * Fixed issue `1537`_: numpypy __array_interface__ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -31,3 +31,18 @@ more precise information about which functions can be called. Needed for Topaz. .. branch: ssl_moving_write_buffer + +.. branch: pythoninspect-fix +Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process +to start interactive prompt when the script execution finishes. This adds +new __pypy__.os.real_getenv call that bypasses Python cache and looksup env +in the underlying OS. Translatorshell now works on PyPy. + +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. + +.. branch: ndarray-subtypes +Allow subclassing ndarray, i.e. matrix diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -556,8 +556,15 @@ # or # * PYTHONINSPECT is set and stdin is a tty. # + try: + # we need a version of getenv that bypasses Python caching + from __pypy__.os import real_getenv + except ImportError: + # dont fail on CPython here + real_getenv = os.getenv + return (interactive or - ((inspect or (readenv and os.getenv('PYTHONINSPECT'))) + ((inspect or (readenv and real_getenv('PYTHONINSPECT'))) and sys.stdin.isatty())) success = True diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -48,7 +48,7 @@ pdir = _get_next_path(ext='') p = pdir.ensure(dir=1).join('__main__.py') p.write(str(py.code.Source(source))) - # return relative path for testing purposes + # return relative path for testing purposes return py.path.local().bestrelpath(pdir) demo_script = getscript(""" @@ -706,6 +706,20 @@ assert 'hello world\n' in data assert '42\n' in data + def test_putenv_fires_interactive_within_process(self): + try: + import __pypy__ + except ImportError: + py.test.skip("This can be only tested on PyPy with real_getenv") + + # should be noninteractive when piped in + data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' + self.run('', senddata=data, expect_prompt=False) + + # should go interactive with -c + data = data.replace('\n', ';') + self.run("-c '%s'" % data, expect_prompt=True) + def test_option_S_copyright(self): data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data @@ -971,7 +985,7 @@ pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') app_main.setup_bootstrap_path(pypy_c) newpath = sys.path[:] - # we get at least lib_pypy + # we get at least lib_pypy # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) assert len(newpath) >= 2 for p in newpath: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -50,6 +50,13 @@ } +class OsModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'real_getenv': 'interp_os.real_getenv' + } + + class Module(MixedModule): appleveldefs = { } @@ -82,6 +89,7 @@ "time": TimeModule, "thread": ThreadModule, "intop": IntOpModule, + "os": OsModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_os.py @@ -0,0 +1,9 @@ +import os + +from pypy.interpreter.gateway import unwrap_spec + + + at unwrap_spec(name='str0') +def real_getenv(space, name): + """Get an OS environment value skipping Python cache""" + return space.wrap(os.environ.get(name)) diff --git a/pypy/module/__pypy__/test/test_os.py b/pypy/module/__pypy__/test/test_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_os.py @@ -0,0 +1,16 @@ +class AppTestOs: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_real_getenv(self): + import __pypy__.os + import os + + key = 'UNLIKELY_SET' + assert key not in os.environ + os.putenv(key, '42') + # this one skips Python cache + assert __pypy__.os.real_getenv(key) == '42' + # this one can only see things set on interpter start (cached) + assert os.getenv(key) is None + os.unsetenv(key) + assert __pypy__.os.real_getenv(key) is None diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -4,6 +4,7 @@ from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.module._rawffi.interp_rawffi import wrap_dlopenerror from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rdynload import DLLHANDLE, dlopen, dlsym, dlclose, DLOpenError @@ -24,9 +25,7 @@ try: self.handle = dlopen(ll_libname, flags) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, - "cannot load library %s: %s", - filename, e.msg) + raise wrap_dlopenerror(space, e, filename) self.name = filename def __del__(self): diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_ffi/interp_funcptr.py @@ -14,7 +14,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error +from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os if os.name == 'nt': @@ -324,8 +324,7 @@ try: self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', self.name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, self.name) def getfunc(self, space, w_name, w_argtypes, w_restype): return _getfunc(space, self, w_name, w_argtypes, w_restype) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -140,6 +140,11 @@ raise OperationError(space.w_SystemError, space.wrap("not supported by libffi")) +def wrap_dlopenerror(space, e, filename): + msg = e.msg if e.msg else 'unspecified error' + return operationerrfmt(space.w_OSError, 'Cannot load library %s: %s', + filename, msg) + class W_CDLL(W_Root): def __init__(self, space, name, cdll): @@ -219,8 +224,7 @@ try: cdll = CDLL(name) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, name) except OSError, e: raise wrap_oserror(space, e) return space.wrap(W_CDLL(space, name, cdll)) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -223,7 +223,8 @@ _rawffi.CDLL("xxxxx_this_name_does_not_exist_xxxxx") except OSError, e: print e - assert str(e).startswith("xxxxx_this_name_does_not_exist_xxxxx: ") + assert str(e).startswith( + "Cannot load library xxxxx_this_name_does_not_exist_xxxxx: ") else: raise AssertionError("did not fail??") diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -229,7 +229,7 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return chunks.apply(orig_arr) + return chunks.apply(space, orig_arr) def descr_setitem(self, space, orig_arr, w_index, w_value): try: @@ -238,7 +238,7 @@ except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) - view = chunks.apply(orig_arr) + view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value) def transpose(self, orig_array): @@ -269,14 +269,14 @@ shape, skip) return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - def swapaxes(self, orig_arr, axis1, axis2): + def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] strides = self.get_strides()[:] backstrides = self.get_backstrides()[:] shape[axis1], shape[axis2] = shape[axis2], shape[axis1] strides[axis1], strides[axis2] = strides[axis2], strides[axis1] backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] - return W_NDimArray.new_slice(self.start, strides, + return W_NDimArray.new_slice(space, self.start, strides, backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): @@ -289,13 +289,16 @@ return ArrayBuffer(self) def astype(self, space, dtype): - new_arr = W_NDimArray.from_shape(self.get_shape(), dtype) + strides, backstrides = support.calc_strides(self.get_shape(), dtype, + self.order) + impl = ConcreteArray(self.get_shape(), dtype, self.order, + strides, backstrides) if self.dtype.is_str_or_unicode() and not dtype.is_str_or_unicode(): raise OperationError(space.w_NotImplementedError, space.wrap( "astype(%s) not implemented yet" % self.dtype)) else: - loop.setslice(space, new_arr.get_shape(), new_arr.implementation, self) - return new_arr + loop.setslice(space, impl.get_shape(), impl, self) + return impl class ConcreteArrayNotOwning(BaseConcreteArray): diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -139,7 +139,7 @@ if not new_shape: return self if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(new_shape, self.dtype) + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) arr_iter.setitem(self.value) return arr.implementation @@ -152,7 +152,7 @@ def create_axis_iter(self, shape, dim, cum): raise Exception("axis iter should not happen on scalar") - def swapaxes(self, orig_array, axis1, axis2): + def swapaxes(self, space, orig_array, axis1, axis2): raise Exception("should not be called") def fill(self, w_value): @@ -166,7 +166,7 @@ return space.wrap(0) def astype(self, space, dtype): - return W_NDimArray.new_scalar(space, dtype, self.value) + raise Exception("should not be called") def base(self): return None diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -126,7 +126,7 @@ axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(arr.get_shape(), dtype) + index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -10,6 +10,15 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) +def wrap_impl(space, w_cls, w_instance, impl): + if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): + w_ret = W_NDimArray(impl) + else: + w_ret = space.allocate_instance(W_NDimArray, w_cls) + W_NDimArray.__init__(w_ret, impl) + assert isinstance(w_ret, W_NDimArray) + space.call_method(w_ret, '__array_finalize__', w_instance) + return w_ret class ArrayArgumentException(Exception): pass @@ -20,10 +29,11 @@ def __init__(self, implementation): assert isinstance(implementation, BaseArrayImplementation) + assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod - def from_shape(shape, dtype, order='C'): + def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -32,10 +42,12 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) + if w_instance: + return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(shape, storage, dtype, order='C', owning=False): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -46,15 +58,20 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) + if w_subtype: + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_method(w_ret, '__array_finalize__', w_subtype) + return w_ret return W_NDimArray(impl) @staticmethod - def new_slice(offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): from pypy.module.micronumpy.arrayimpl import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) - return W_NDimArray(impl) + return wrap_impl(space, space.type(orig_arr), orig_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -35,10 +35,12 @@ class BadToken(Exception): pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -184,14 +186,23 @@ def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) - return False - #return w_obj.boolval + return w_obj.boolval def is_w(self, w_obj, w_what): return w_obj is w_what + def issubtype(self, w_type1, w_type2): + return BoolObject(True) + def type(self, w_obj): - return w_obj.tp + if self.is_none(w_obj): + return self.w_None + try: + return w_obj.tp + except AttributeError: + if isinstance(w_obj, W_NDimArray): + return W_NDimArray + return self.w_None def gettypefor(self, w_obj): return None @@ -199,6 +210,11 @@ def call_function(self, tp, w_dtype): return w_dtype + def call_method(self, w_obj, s, *args): + # XXX even the hacks have hacks + return None + #return getattr(w_obj, 'descr_' + s)(self, *args) + @specialize.arg(1) def interp_w(self, tp, what): assert isinstance(what, tp) @@ -329,6 +345,8 @@ self.name = name.strip(" ") def execute(self, interp): + if self.name == 'None': + return None return interp.variables[self.name] def __repr__(self): @@ -451,6 +469,32 @@ def __repr__(self): return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) +class ArrayClass(Node): + def __init__(self): + self.v = W_NDimArray + + def execute(self, interp): + return self.v + + def __repr__(self): + return '' + +class DtypeClass(Node): + def __init__(self, dt): + self.v = dt + + def execute(self, interp): + if self.v == 'int': + dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'float': + dtype = get_dtype_cache(interp.space).w_float64dtype + else: + raise BadToken('unknown v to dtype "%s"' % self.v) + return dtype + + def __repr__(self): + return '' % self.v + class Execute(Node): def __init__(self, expr): self.expr = expr @@ -533,6 +577,14 @@ w_res = where(interp.space, arr, arg1, arg2) else: assert False + elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: + if len(self.args) != 2: + raise ArgumentMismatch + arg = self.args[1].execute(interp) + if self.name == 'view': + w_res = arr.descr_view(interp.space, arg) + else: + assert False else: raise WrongFunctionName if isinstance(w_res, W_NDimArray): @@ -652,8 +704,14 @@ if token.name == 'identifier': if tokens.remaining() and tokens.get(0).name == 'paren_left': stack.append(self.parse_function_call(token.v, tokens)) + elif token.v.strip(' ') == 'ndarray': + stack.append(ArrayClass()) + elif token.v.strip(' ') == 'int': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'float': + stack.append(DtypeClass('float')) else: - stack.append(Variable(token.v)) + stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': stack.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'operator': diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -88,7 +88,7 @@ y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2): @@ -131,7 +131,8 @@ arr.get_dtype()) if _axis < 0 or len(arr.get_shape()) <= _axis: raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) - res = W_NDimArray.from_shape(shape, dtype, 'C') + # concatenate does not handle ndarray subtypes, it always returns a ndarray + res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: @@ -139,7 +140,7 @@ continue chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, arr.get_shape()[_axis]) - Chunks(chunks).apply(res).implementation.setslice(space, arr) + Chunks(chunks).apply(space, res).implementation.setslice(space, arr) axis_start += arr.get_shape()[_axis] return res @@ -150,22 +151,22 @@ arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, - orig_size)]).apply(res).implementation.setslice(space, arr) + orig_size)]).apply(space, w_res).implementation.setslice(space, arr) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) - Chunks(chunks).apply(res).implementation.setslice(space, arr) - return res + Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) + return w_res def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) @@ -261,7 +262,7 @@ else: shape = (shape[:axis2] + shape[axis2 + 1:axis1] + shape[axis1 + 1:] + [size]) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) if size == 0: return out if shapelen == 2: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -41,7 +41,7 @@ dtype = w_arr_list[0].get_dtype() for w_arr in w_arr_list[1:]: dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) - out = base.W_NDimArray.from_shape(shape, dtype) + out = base.W_NDimArray.from_shape(space, shape, dtype) return out diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -64,8 +64,8 @@ base_iter.next_skip_x(start) if length == 1: return base_iter.getitem() - res = W_NDimArray.from_shape([length], base.get_dtype(), - base.get_order()) + res = W_NDimArray.from_shape(space, [length], base.get_dtype(), + base.get_order(), w_instance=base) return loop.flatiter_getitem(res, base_iter, step) def descr_setitem(self, space, w_idx, w_value): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, issequence_w + ArrayArgumentException, issequence_w, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ interp_arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ @@ -85,8 +85,8 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - res = W_NDimArray.from_shape(res_shape, self.get_dtype()) - return loop.getitem_filter(res, self, arr) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) + return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): @@ -145,12 +145,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return chunks.apply(self) + return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] - res = W_NDimArray.from_shape(shape, self.get_dtype(), self.get_order()) - if not res.get_size(): - return res - return loop.getitem_array_int(space, self, res, iter_shape, indexes, + w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), + self.get_order(), w_instance=self) + if not w_res.get_size(): + return w_res + return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, prefix) def setitem_array_int(self, space, w_index, w_value): @@ -161,7 +162,7 @@ # w_index is a list of slices w_value = convert_to_array(space, w_value) chunks = self.implementation._prepare_slice_args(space, w_index) - view = chunks.apply(self) + view = chunks.apply(space, self) view.implementation.setslice(space, w_value) return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, @@ -259,14 +260,17 @@ return self.implementation.get_scalar_value() def descr_copy(self, space): - return W_NDimArray(self.implementation.copy(space)) + copy = self.implementation.copy(space) + w_subtype = space.type(self) + return wrap_impl(space, w_subtype, self, copy) def descr_get_real(self, space): - return W_NDimArray(self.implementation.get_real(self)) + return wrap_impl(space, space.type(self), self, + self.implementation.get_real(self)) def descr_get_imag(self, space): ret = self.implementation.get_imag(self) - return W_NDimArray(ret) + return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): # copy (broadcast) values into self @@ -298,7 +302,7 @@ new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) new_impl = self.implementation.reshape(space, self, new_shape) if new_impl is not None: - return W_NDimArray(new_impl) + return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: @@ -326,7 +330,7 @@ """ if self.is_scalar(): return self - return self.implementation.swapaxes(self, axis1, axis2) + return self.implementation.swapaxes(space, self, axis1, axis2) def descr_tolist(self, space): if len(self.get_shape()) == 0: @@ -446,17 +450,24 @@ # we must do that, because we need a working set. otherwise # we would modify the array in-place. Use this to our advantage # by converting nonnative byte order. + if self.is_scalar(): + return space.wrap(0) s = self.get_dtype().name if not self.get_dtype().native: s = s[1:] dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] contig = self.implementation.astype(space, dtype) - return contig.implementation.argsort(space, w_axis) + return contig.argsort(space, w_axis) def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - return self.implementation.astype(space, dtype) + impl = self.implementation + if isinstance(impl, scalar.Scalar): + return W_NDimArray.new_scalar(space, dtype, impl.value) + else: + new_impl = impl.astype(space, dtype) + return wrap_impl(space, space.type(self), self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -471,9 +482,9 @@ loop.byteswap(self.implementation, self.implementation) return self else: - res = W_NDimArray.from_shape(self.get_shape(), self.get_dtype()) - loop.byteswap(self.implementation, res.implementation) - return res + w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_instance=self) + loop.byteswap(self.implementation, w_res.implementation) + return w_res @unwrap_spec(mode=str) def descr_choose(self, space, w_choices, w_out=None, mode='raise'): @@ -564,7 +575,7 @@ if space.is_none(w_out): if self.get_dtype().is_bool_type(): #numpy promotes bool.round() to float16. Go figure. - w_out = W_NDimArray.from_shape(self.get_shape(), + w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) else: w_out = None @@ -578,6 +589,8 @@ else: calc_dtype = out.get_dtype() + if decimals == 0: + out = out.descr_view(space,space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out @@ -619,9 +632,13 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - if w_type is not None: - raise OperationError(space.w_NotImplementedError, space.wrap( - "view(... type=) not implemented yet")) + if not w_type and w_dtype: + try: + if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + w_type = w_dtype + w_dtype = None + except (OperationError, TypeError): + pass if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -651,8 +668,9 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize - return W_NDimArray(impl.get_view(self, dtype, new_shape)) - + v = impl.get_view(self, dtype, new_shape) + w_ret = wrap_impl(space, w_type, self, v) + return w_ret # --------------------- operations ---------------------------- @@ -760,9 +778,9 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - result = W_NDimArray.from_shape(out_shape, dtype) + w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, result, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) @unwrap_spec(w_axis = WrappedDefault(None)) @@ -884,14 +902,20 @@ isfortran = space.getitem(w_state, space.wrap(3)) storage = space.getitem(w_state, space.wrap(4)) - self.implementation = W_NDimArray.from_shape_and_storage([space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, owning=True).implementation + self.implementation = W_NDimArray.from_shape_and_storage(space, + [space.int_w(i) for i in space.listview(shape)], + rffi.str2charp(space.str_w(storage), track_allocation=False), + dtype, owning=True).implementation + def descr___array_finalize__(self, space, w_obj): + pass - at unwrap_spec(offset=int) + at unwrap_spec(offset=int, order=str) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, w_order=None): + offset=0, w_strides=None, order='C'): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray + from pypy.module.micronumpy.support import calc_strides if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_order) or not space.is_none(w_buffer)): raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) @@ -900,10 +924,19 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype) - return W_NDimArray.from_shape(shape, dtype) + if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): + return W_NDimArray.from_shape(space, shape, dtype, order) + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = ConcreteArray(shape, dtype.base, order, strides, + backstrides) + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_function(space.getattr(w_ret, + space.wrap('__array_finalize__')), w_subtype) + return w_ret @unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype): +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -912,9 +945,17 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), + w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape_and_storage(shape, storage, dtype) + if w_subtype: + if not space.isinstance_w(w_subtype, space.w_type): + raise OperationError(space.w_ValueError, space.wrap( + "subtype must be a subtype of ndarray, not a class instance")) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + 'C', False, w_subtype) + else: + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) W_NDimArray.typedef = TypeDef( "ndarray", @@ -1042,6 +1083,7 @@ W_NDimArray.fdel___pypy_data__), __reduce__ = interp2app(W_NDimArray.descr_reduce), __setstate__ = interp2app(W_NDimArray.descr_setstate), + __array_finalize__ = interp2app(W_NDimArray.descr___array_finalize__), ) @unwrap_spec(ndmin=int, copy=bool, subok=bool) @@ -1094,12 +1136,12 @@ dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape - arr = W_NDimArray.from_shape(shape, dtype, order=order) - arr_iter = arr.create_iter() + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + arr_iter = w_arr.create_iter() for w_elem in elems_w: arr_iter.setitem(dtype.coerce(space, w_elem)) arr_iter.next() - return arr + return w_arr @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): @@ -1109,7 +1151,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(shape, dtype=dtype, order=order)) + return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): @@ -1119,10 +1161,10 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - arr = W_NDimArray.from_shape(shape, dtype=dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) - arr.fill(one) - return space.wrap(arr) + w_arr.fill(one) + return space.wrap(w_arr) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -50,7 +50,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([num_items], dtype=dtype) + a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) ai = a.create_iter() for val in items: ai.setitem(val) @@ -71,7 +71,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([count], dtype=dtype) + a = W_NDimArray.from_shape(space, [count], dtype=dtype) loop.fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -181,7 +181,8 @@ temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() - temp = W_NDimArray.from_shape(temp_shape, dtype) + temp = W_NDimArray.from_shape(space, temp_shape, dtype, + w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -207,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -216,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape([obj.get_size()], dtype) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out @@ -295,7 +296,7 @@ return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) - return loop.call1(shape, self.func, calc_dtype, res_dtype, + return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out) @@ -370,7 +371,7 @@ return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) - return loop.call2(new_shape, self.func, calc_dtype, + return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out) @@ -450,7 +451,7 @@ return dt2 return dt1 return dt2 - else: + else: # increase to the next signed type dtypenum = dt2.num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] @@ -537,7 +538,13 @@ return current_guess if current_guess is complex_type: return complex_type - return interp_dtype.get_dtype_cache(space).w_float64dtype + if space.isinstance_w(w_obj, space.w_float): + return float_type + elif space.isinstance_w(w_obj, space.w_slice): + return long_dtype + raise operationerrfmt(space.w_NotImplementedError, + 'unable to create dtype from objects, ' '"%T" instance not supported', + w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -58,11 +58,11 @@ def __init__(self, name): self.name = name - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(arr.start + ofs, arr.get_strides(), + return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), arr.get_backstrides(), arr.shape, arr, orig_arr, subdtype) @@ -81,13 +81,13 @@ assert s >= 0 return shape[:] + old_shape[s:] - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation shape = self.extend_shape(arr.shape) r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), self.l) _, start, strides, backstrides = r - return W_NDimArray.new_slice(start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, orig_arr) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -19,9 +19,34 @@ reds = ['shape', 'w_lhs', 'w_rhs', 'out', 'left_iter', 'right_iter', 'out_iter']) -def call2(shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): +def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): + # handle array_priority + # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does: + # 1. if __array_priorities__ are equal and one is an ndarray and the + # other is a subtype, flip the order + # 2. elif rhs.__array_priority__ is higher, flip the order + # Now return the subtype of the first one + + w_ndarray = space.gettypefor(W_NDimArray) + lhs_type = space.type(w_lhs) + rhs_type = space.type(w_rhs) + lhs_for_subtype = w_lhs + rhs_for_subtype = w_rhs + #it may be something like a FlatIter, which is not an ndarray + if not space.is_true(space.issubtype(lhs_type, w_ndarray)): + lhs_type = space.type(w_lhs.base) + lhs_for_subtype = w_lhs.base + if not space.is_true(space.issubtype(rhs_type, w_ndarray)): + rhs_type = space.type(w_rhs.base) + rhs_for_subtype = w_rhs.base + if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray): + lhs_for_subtype = rhs_for_subtype + + # TODO handle __array_priorities__ and maybe flip the order + if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, + w_instance=lhs_for_subtype) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -48,9 +73,9 @@ reds = ['shape', 'w_obj', 'out', 'obj_iter', 'out_iter']) -def call1(shape, func, calc_dtype, res_dtype, w_obj, out): +def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) @@ -437,12 +462,12 @@ def tostring(space, arr): builder = StringBuilder() iter = arr.create_iter() - res_str = W_NDimArray.from_shape([1], arr.get_dtype(), order='C') + w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().itemtype.get_element_size() res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - res_str.implementation.get_storage_as_int(space)) + w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): - res_str.implementation.setitem(0, iter.getitem()) + w_res_str.implementation.setitem(0, iter.getitem()) for i in range(itemsize): builder.append(res_str_casted[i]) iter.next() diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -2,7 +2,7 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, - FunctionCall, FakeSpace) + FunctionCall, FakeSpace, W_NDimArray) class TestCompiler(object): @@ -84,6 +84,7 @@ assert interp.code.statements[0] == Assignment( 'a', Operator(Variable('b'), "+", FloatConstant(3))) + class TestRunner(object): def run(self, code): interp = numpy_compile(code) @@ -290,4 +291,32 @@ ''') assert interp.results[0].real == 0 assert interp.results[0].imag == 1 - + + def test_view_none(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = None + c = view(a, b) + c -> 0 + ''') + assert interp.results[0].value == 1 + + def test_view_ndarray(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = ndarray + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + + def test_view_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -976,3 +976,16 @@ assert a[0] == 1 assert (a + a)[1] == 4 +class AppTestObjectDtypes(BaseNumpyAppTest): + def test_scalar_from_object(self): + from numpypy import array + class Polynomial(object): + pass + try: + a = array(Polynomial()) + assert a.shape == () + except NotImplementedError, e: + if e.message.find('unable to create dtype from objects')>=0: + skip('creating ojbect dtype not supported yet') + + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -24,8 +24,8 @@ def get_size(self): return 1 -def create_slice(a, chunks): - return Chunks(chunks).apply(W_NDimArray(a)).implementation +def create_slice(space, a, chunks): + return Chunks(chunks).apply(space, W_NDimArray(a)).implementation def create_array(*args, **kwargs): @@ -46,100 +46,100 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] - a = create_array([1, 0, 7], MockDtype(), order='C') + a = create_array(self.space, [1, 0, 7], MockDtype(), order='C') assert a.strides == [7, 7, 1] assert a.backstrides == [0, 0, 6] def test_create_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -207,7 +207,8 @@ raw_storage_setitem(storage, i, rffi.cast(rffi.UCHAR, i)) # dtypes = get_dtype_cache(self.space) - w_array = W_NDimArray.from_shape_and_storage([2, 2], storage, dtypes.w_int8dtype) + w_array = W_NDimArray.from_shape_and_storage(self.space, [2, 2], + storage, dtypes.w_int8dtype) def get(i, j): return w_array.getitem(self.space, [i, j]).value assert get(0, 0) == 0 @@ -1442,7 +1443,7 @@ assert x.view('int8').shape == (10, 3) def test_ndarray_view_empty(self): - from numpypy import array, int8, int16, dtype + from numpypy import array, int8, int16 x = array([], dtype=[('a', int8), ('b', int8)]) y = x.view(dtype=int16) @@ -2876,6 +2877,12 @@ assert y[0, 1] == 2 y[0, 1] = 42 assert x[1] == 42 + class C(ndarray): + pass + z = ndarray._from_shape_and_storage([4, 1], addr, x.dtype, C) + assert isinstance(z, C) + assert z.shape == (4, 1) + assert z[1, 0] == 42 def test___pypy_data__(self): from numpypy import array @@ -2890,7 +2897,7 @@ class AppTestLongDoubleDtypes(BaseNumpyAppTest): def setup_class(cls): from pypy.module.micronumpy import Module - print dir(Module.interpleveldefs) + #print dir(Module.interpleveldefs) if not Module.interpleveldefs.get('longfloat', None): py.test.skip('no longdouble types yet') BaseNumpyAppTest.setup_class.im_func(cls) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -0,0 +1,223 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestSupport(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_NoNew = cls.space.appexec([], '''(): + from numpypy import ndarray + class NoNew(ndarray): + def __new__(cls, subtype): + raise ValueError('should not call __new__') + def __array_finalize__(self, obj): + + self.called_finalize = True + return NoNew ''') + cls.w_SubType = cls.space.appexec([], '''(): + from numpypy import ndarray, asarray + class SubType(ndarray): + def __new__(obj, input_array): + obj = asarray(input_array).view(obj) + obj.called_new = True + return obj + def __array_finalize__(self, obj): + self.called_finalize = True + return SubType ''') + + def test_subtype_base(self): + from numpypy import ndarray, dtype + class C(ndarray): + def __new__(subtype, shape, dtype): + self = ndarray.__new__(subtype, shape, dtype) + self.id = 'subtype' + return self + a = C([2, 2], int) + assert isinstance(a, C) + assert isinstance(a, ndarray) + assert a.shape == (2, 2) + assert a.dtype is dtype(int) + assert a.id == 'subtype' + a = a.reshape(1, 4) + b = a.reshape(4, 1) + assert isinstance(b, C) + #make sure __new__ was not called + assert not getattr(b, 'id', None) + a.fill(3) + b = a[0] + assert isinstance(b, C) + assert (b == 3).all() + b[0]=100 + assert a[0,0] == 100 + + def test_subtype_view(self): + from numpypy import ndarray, array + class matrix(ndarray): + def __new__(subtype, data, dtype=None, copy=True): + if isinstance(data, matrix): + return data + return data.view(subtype) + a = array(range(5)) + b = matrix(a) + assert isinstance(b, matrix) + assert (b == a).all() + + + def test_finalize(self): + #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray + import numpypy as np + class InfoArray(np.ndarray): + def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + strides=None, order='C', info=None): + obj = np.ndarray.__new__(subtype, shape, dtype, buffer, + offset, strides, order) + obj.info = info + return obj + + def __array_finalize__(self, obj): + if obj is None: + print 'finalize with None' + return + # printing the object itself will crash the test + print 'finalize with something',type(obj) + self.info = getattr(obj, 'info', None) + obj = InfoArray(shape=(3,)) + assert isinstance(obj, InfoArray) + assert obj.info is None + obj = InfoArray(shape=(3,), info='information') + assert obj.info == 'information' + v = obj[1:] + assert isinstance(v, InfoArray) + assert v.base is obj + assert v.info == 'information' + arr = np.arange(10) + cast_arr = arr.view(InfoArray) + assert isinstance(cast_arr, InfoArray) + assert cast_arr.base is arr + assert cast_arr.info is None + + def test_sub_where(self): + from numpypy import where, ones, zeros, array + a = array([1, 2, 3, 0, -3]) + v = a.view(self.NoNew) + b = where(array(v) > 0, ones(5), zeros(5)) + assert (b == [1, 1, 1, 0, 0]).all() + # where returns an ndarray irregardless of the subtype of v + assert not isinstance(b, self.NoNew) + + def test_sub_repeat(self): + from numpypy import repeat, array + a = self.SubType(array([[1, 2], [3, 4]])) + b = repeat(a, 3) + assert (b == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]).all() + assert isinstance(b, self.SubType) + + def test_sub_flatiter(self): + from numpypy import array + a = array(range(9)).reshape(3, 3).view(self.NoNew) + c = array(range(9)).reshape(3, 3) + assert isinstance(a.flat[:] + a.flat[:], self.NoNew) + assert isinstance(a.flat[:] + c.flat[:], self.NoNew) + assert isinstance(c.flat[:] + a.flat[:], self.NoNew) + assert not isinstance(c.flat[:] + c.flat[:], self.NoNew) + + def test_sub_getitem_filter(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + c = b[array([False, True, False, True, False])] + assert c.shape == (2,) + assert (c == [1, 3]).all() + assert isinstance(c, self.SubType) + assert b.called_new + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_getitem_array_int(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + assert b.called_new + c = b[array([3, 2, 1, 4])] + assert (c == [3, 2, 1, 4]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_round(self): + from numpypy import array + a = array(range(10), dtype=float).view(self.NoNew) + # numpy compatibility + b = a.round(decimals=0) + assert isinstance(b, self.NoNew) + b = a.round(decimals=1) + assert not isinstance(b, self.NoNew) + b = a.round(decimals=-1) + assert not isinstance(b, self.NoNew) + + def test_sub_dot(self): + # the returned type is that of the first argument + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = array(range(12)).reshape(4,3).view(self.SubType) + d = c.dot(a) + assert isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert d.called_finalize + d = a.dot(c) + assert not isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert not getattr(d, 'called_finalize', False) + + def test_sub_reduce(self): + # i.e. sum, max + # test for out as well + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = b.sum(axis=0) + assert (c == [12, 15, 18, 21]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + d = array(range(4)) + c = b.sum(axis=0, out=d) + assert c is d + assert not isinstance(c, self.SubType) + d = array(range(4)).view(self.NoNew) + c = b.sum(axis=0, out=d) + assert c is d + assert isinstance(c, self.NoNew) + + def test_sub_call2(self): + # c + a vs. a + c, what about array priority? + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = self.SubType(range(12)) + c = b + a + assert isinstance(c, self.SubType) + c = a + b + assert isinstance(c, self.NoNew) + d = range(12) + e = a - d + assert isinstance(e, self.NoNew) + + def test_sub_call1(self): + from numpypy import array, sqrt + a = array(range(12)).view(self.NoNew) + b = sqrt(a) + assert b.called_finalize == True + + def test_sub_astype(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.astype(float) + assert b.called_finalize == True + + def test_sub_reshape(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.reshape(3, 4) + assert b.called_finalize == True + diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -184,14 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 - def test_statvfs(self): - st = self.posix.statvfs(".") - assert isinstance(st, self.posix.statvfs_result) - for field in [ - 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', - 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', - ]: - assert hasattr(st, field) + if hasattr(__import__(os.name), "statvfs"): + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) def test_pickle(self): import pickle, os diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,12 +125,10 @@ self.llbox = llbox def descr_getint(self, space): - try: - value = jit_hooks.box_getint(self.llbox) - except NotImplementedError: + if not jit_hooks.box_isint(self.llbox): raise OperationError(space.w_NotImplementedError, space.wrap("Box has no int value")) - return space.wrap(value) + return space.wrap(jit_hooks.box_getint(self.llbox)) @unwrap_spec(no=int) def descr_new_box(space, w_tp, no): diff --git a/rpython/jit/tool/test/f.pypylog.bz2 b/rpython/jit/tool/test/f.pypylog.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..a982e459b1daa33547576733ccc0b560f99a3f79 GIT binary patch [cut] diff --git a/rpython/jit/tool/test/test_traceviewer.py b/rpython/jit/tool/test/test_traceviewer.py --- a/rpython/jit/tool/test/test_traceviewer.py +++ b/rpython/jit/tool/test/test_traceviewer.py @@ -1,7 +1,7 @@ import math import py from rpython.jit.tool.traceviewer import splitloops, FinalBlock, Block,\ - split_one_loop, postprocess, main, get_gradient_color + split_one_loop, postprocess, main, get_gradient_color, guard_number def test_gradient_color(): @@ -30,6 +30,20 @@ loops = splitloops(data) assert len(loops) == 2 + def test_no_of_loops_hexguards(self): + data = [preparse(""" + # Loop 0 : loop with 39 ops + debug_merge_point('', 0) + guard_class(p4, 141310752, descr=) [p0, p1] + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), preparse(""" + # Loop 1 : loop with 46 ops + p21 = getfield_gc(p4, descr=) + """)] + loops = splitloops(data) + assert len(loops) == 2 + def test_split_one_loop(self): real_loops = [FinalBlock(preparse(""" p21 = getfield_gc(p4, descr=) @@ -50,12 +64,42 @@ assert loop.left.content == '' assert loop.right.content == 'extra' + def test_split_one_loop_hexguards(self): + real_loops = [FinalBlock(preparse(""" + p21 = getfield_gc(p4, descr=) + guard_class(p4, 141310752, descr=) [p0, p1] + """), None), FinalBlock(preparse(""" + p60 = getfield_gc(p4, descr=) + guard_nonnull(p60, descr=) [p0, p1] + """), None)] + real_loops[0].loop_no = 0 + real_loops[1].loop_no = 1 + allloops = real_loops[:] + split_one_loop(real_loops, 'Guard0x10abcdef0', 'extra', 1, guard_number(("0x10abcdef0", "0x")), allloops) + loop = real_loops[1] + assert isinstance(loop, Block) + assert loop.content.endswith('p1]') + loop.left = allloops[loop.left] + loop.right = allloops[loop.right] + assert loop.left.content == '' + assert loop.right.content == 'extra' + def test_postparse(self): real_loops = [FinalBlock("debug_merge_point(' #40 POP_TOP', 0)", None)] postprocess(real_loops, real_loops[:], {}) assert real_loops[0].header.startswith("_runCallbacks, file '/tmp/x/twisted-trunk/twisted/internet/defer.py', line 357") + def test_postparse_new(self): + real_loops = [FinalBlock("debug_merge_point(0, 0, ' #351 LOAD_FAST')", None)] + postprocess(real_loops, real_loops[:], {}) + assert real_loops[0].header.startswith("_optimize_charset. file '/usr/local/Cellar/pypy/2.0-beta2/lib-python/2.7/sre_compile.py'. line 207") + def test_load_actual(self): fname = py.path.local(__file__).join('..', 'data.log.bz2') main(str(fname), False, view=False) # assert did not explode + + def test_load_actual_f(self): + fname = py.path.local(__file__).join('..', 'f.pypylog.bz2') + main(str(fname), False, view=False) + # assert did not explode diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -56,6 +56,18 @@ BOX_COLOR = (128, 0, 96) +GUARDNO_RE = "((0x)?[\da-f]+)" +def guard_number(guardno_match): + if (len(guardno_match) == 1 # ("12354",) + or guardno_match[1] != "0x" # ("12345", None) + ): + return int(guardno_match[0]) + else: # ("0x12ef", "0x") + return int(guardno_match[0], 16) + +def guard_number_string(guardno_match): + return guardno_match[0] # its always the first group + class BasicBlock(object): counter = 0 startlineno = 0 @@ -85,13 +97,15 @@ def set_content(self, content): self._content = content - groups = re.findall('Guard(\d+)', content) + groups = re.findall('Guard' + GUARDNO_RE, content) if not groups: self.first_guard = -1 self.last_guard = -1 else: - self.first_guard = int(groups[0]) - self.last_guard = int(groups[-1]) + # guards can be out of order nowadays + groups = sorted(groups) + self.first_guard = guard_number(groups[0]) + self.last_guard = guard_number(groups[-1]) content = property(get_content, set_content) @@ -197,11 +211,11 @@ _loop.loop_no = no allloops.append(_loop) else: - m = re.search("bridge out of Guard (\d+)", firstline) + m = re.search("bridge out of Guard " + GUARDNO_RE, firstline) assert m - guard_s = 'Guard' + m.group(1) + guard_s = 'Guard' + guard_number_string(m.groups()) split_one_loop(real_loops, guard_s, loop, counter, - int(m.group(1)), allloops) + guard_number(m.groups()), allloops) counter += loop.count("\n") + 2 return real_loops, allloops @@ -211,7 +225,7 @@ memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\('( (.*?))'", loop.content) + m = re.search("debug_merge_point\((?:\d+,\ )*'( (.*?))'", loop.content) if m is None: name = '?' loop.key = '?' @@ -236,7 +250,7 @@ content = loop.content loop.content = "Logfile at %d\n" % loop.startlineno + content loop.postprocess(loops, memo, counts) - + def postprocess(loops, allloops, counts): for loop in allloops: if isinstance(loop, Block): diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -111,6 +111,11 @@ from rpython.jit.metainterp.history import Const return isinstance(_cast_to_box(llbox), Const) + at register_helper(annmodel.SomeBool()) +def box_isint(llbox): + from rpython.jit.metainterp.history import INT + return _cast_to_box(llbox).type == INT + # ------------------------- stats interface --------------------------- @register_helper(annmodel.SomeBool()) diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -46,6 +46,26 @@ data = getllimpl(os.getlogin)() assert data == expected +def test_statvfs(): + if not hasattr(os, 'statvfs'): + py.test.skip('posix specific function') + try: + expected = os.statvfs('.') + except OSError, e: + py.test.skip("the underlying os.statvfs() failed: %s" % e) + data = getllimpl(os.statvfs)('.') + assert data == expected + +def test_fstatvfs(): + if not hasattr(os, 'fstatvfs'): + py.test.skip('posix specific function') + try: + expected = os.fstatvfs(0) + except OSError, e: + py.test.skip("the underlying os.fstatvfs() failed: %s" % e) + data = getllimpl(os.fstatvfs)(0) + assert data == expected + def test_utimes(): if os.name != 'nt': py.test.skip('Windows specific feature') From noreply at buildbot.pypy.org Sat Jul 27 10:08:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 10:08:31 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: I *think* that by now stm_pointer_equal() can be done more simply, Message-ID: <20130727080831.ADB151C142B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r462:d4e3aac8c458 Date: 2013-07-27 10:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/d4e3aac8c458/ Log: I *think* that by now stm_pointer_equal() can be done more simply, without needing to get the stm_id(). diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -131,17 +131,16 @@ _Bool stm_pointer_equal(gcptr p1, gcptr p2) { - /* fast path for two equal pointers */ - if (p1 == p2) - return 1; - /* if p1 or p2 is NULL (but not both, because they are different - pointers), then return 0 */ - if (p1 == NULL || p2 == NULL) - return 0; - /* types must be the same */ - if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) - return 0; - return stm_id(p1) == stm_id(p2); + if (p1 != NULL && p2 != NULL) { + /* resolve h_original, but only if !PREBUILT_ORIGINAL */ + if (p1->h_original && !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p1 = (gcptr)p1->h_original; + } + if (p2->h_original && !(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p2 = (gcptr)p2->h_original; + } + } + return (p1 == p2); } /************************************************************/ diff --git a/c4/test/test_extra.py b/c4/test/test_extra.py --- a/c4/test/test_extra.py +++ b/c4/test/test_extra.py @@ -104,13 +104,28 @@ assert ffi.string(c).endswith("ei424242ee") def test_pointer_equal(): - p = palloc(HDR + WORD) - assert lib.stm_pointer_equal(p, p) - assert not lib.stm_pointer_equal(p, ffi.NULL) - assert not lib.stm_pointer_equal(ffi.NULL, p) - assert lib.stm_pointer_equal(ffi.NULL, ffi.NULL) - q = lib.stm_write_barrier(p) - assert q != p - assert lib.stm_pointer_equal(p, q) - assert lib.stm_pointer_equal(q, q) - assert lib.stm_pointer_equal(q, p) + p1 = palloc(HDR + WORD) + p2 = palloc(HDR + WORD) + p3 = oalloc(HDR + WORD) + p4 = nalloc(HDR + WORD) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + p1b = lib.stm_write_barrier(p1) + p2b = lib.stm_write_barrier(p2) + p3b = lib.stm_write_barrier(p3) + p4b = lib.stm_write_barrier(p4) + # + got = [] + for qa in [ffi.NULL, p1, p1b, p2, p2b, p3, p3b, p4, p4b]: + for qb in [ffi.NULL, p1, p1b, p2, p2b, p3, p3b, p4, p4b]: + got.append(lib.stm_pointer_equal(qa, qb)) + # + assert got == [1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 1, 1] From noreply at buildbot.pypy.org Sat Jul 27 11:28:17 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 27 Jul 2013 11:28:17 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: fix test_llinterp Message-ID: <20130727092817.B96441C1055@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65701:07542962faa0 Date: 2013-07-26 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/07542962faa0/ Log: fix test_llinterp diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -554,6 +554,9 @@ def op_jit_record_known_class(self, *args): pass + def op_jit_conditional_call(self, *args): + raise NotImplementedError("should not be called while not jitted") + def op_get_exception_addr(self, *args): raise NotImplementedError From noreply at buildbot.pypy.org Sat Jul 27 11:28:19 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 27 Jul 2013 11:28:19 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: "Fix" this test. Message-ID: <20130727092819.0EDF31C11A9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65702:ccb3ce114fc2 Date: 2013-07-27 11:27 +0200 http://bitbucket.org/pypy/pypy/changeset/ccb3ce114fc2/ Log: "Fix" this test. diff --git a/rpython/translator/backendopt/test/test_all.py b/rpython/translator/backendopt/test/test_all.py --- a/rpython/translator/backendopt/test/test_all.py +++ b/rpython/translator/backendopt/test/test_all.py @@ -48,6 +48,8 @@ def translateopt(self, func, sig, **optflags): t = TranslationContext() + opts = {'translation.list_comprehension_operations': True} + t.config.set(**opts) t.buildannotator().build_types(func, sig) t.buildrtyper(type_system=self.type_system).specialize() if option.view: @@ -61,7 +63,7 @@ assert big() == 83 t = self.translateopt(big, [], inline_threshold=HUGE_THRESHOLD, - mallocs=True) + mallocs=True) big_graph = graphof(t, big) self.check_malloc_removed(big_graph) @@ -128,7 +130,7 @@ return res def g(x): - return s(100) + s(1) + x + return s(100) + s(1) + x def idempotent(n1, n2): c = [i for i in range(n2)] @@ -301,4 +303,4 @@ class TestOOType(BaseTester): type_system = 'ootype' check_malloc_removed = OOTypeMallocRemovalTest.check_malloc_removed - + diff --git a/rpython/translator/backendopt/test/test_malloc.py b/rpython/translator/backendopt/test/test_malloc.py --- a/rpython/translator/backendopt/test/test_malloc.py +++ b/rpython/translator/backendopt/test/test_malloc.py @@ -365,7 +365,7 @@ def test_classattr_as_defaults(self): class Bar: foo = 41 - + def fn(): x = Bar() x.foo += 1 From noreply at buildbot.pypy.org Sat Jul 27 12:55:02 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 12:55:02 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Re-add _formatter_parser() and _formatter_field_name_split(). Message-ID: <20130727105502.4D4B01C11A9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65703:26fbdbbb7e48 Date: 2013-07-27 12:38 +0200 http://bitbucket.org/pypy/pypy/changeset/26fbdbbb7e48/ Log: Re-add _formatter_parser() and _formatter_field_name_split(). diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -217,6 +217,16 @@ w_u = space.call_function(space.w_unicode, self) return space.call_method(w_u, "join", w_list) + def descr_formatter_parser(self, space): + from pypy.objspace.std.newformat import str_template_formatter + tformat = str_template_formatter(space, space.str_w(self)) + return tformat.formatter_parser() + + def descr_formatter_field_name_split(self, space): + from pypy.objspace.std.newformat import str_template_formatter + tformat = str_template_formatter(space, space.str_w(self)) + return tformat.formatter_field_name_split() + def _create_list_from_string(value): # need this helper function to allow the jit to look inside and inline @@ -324,6 +334,9 @@ __mod__ = interp2app(W_BytesObject.descr_mod), __buffer__ = interp2app(W_BytesObject.descr_buffer), __getnewargs__ = interp2app(W_BytesObject.descr_getnewargs), + _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), + _formatter_field_name_split = + interp2app(W_BytesObject.descr_formatter_field_name_split), ) @@ -372,18 +385,3 @@ buf.append(quote) return buf.build() - - - -#str_formatter_parser = SMM('_formatter_parser', 1) -#str_formatter_field_name_split = SMM('_formatter_field_name_split', 1) -# -#def str_formatter_parser__ANY(space, w_str): -# from pypy.objspace.std.newformat import str_template_formatter -# tformat = str_template_formatter(space, space.str_w(w_str)) -# return tformat.formatter_parser() -# -#def str_formatter_field_name_split__ANY(space, w_str): -# from pypy.objspace.std.newformat import str_template_formatter -# tformat = str_template_formatter(space, space.str_w(w_str)) -# return tformat.formatter_field_name_split() diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -195,6 +195,16 @@ return 0 return 1 + def descr_formatter_parser(self, space): + from pypy.objspace.std.newformat import unicode_template_formatter + tformat = unicode_template_formatter(space, space.unicode_w(self)) + return tformat.formatter_parser() + + def descr_formatter_field_name_split(self, space): + from pypy.objspace.std.newformat import unicode_template_formatter + tformat = unicode_template_formatter(space, space.unicode_w(self)) + return tformat.formatter_field_name_split() + def wrapunicode(space, uni): return W_UnicodeObject(uni) @@ -216,16 +226,6 @@ assert False, "unreachable" -#def unicode_formatter_parser__ANY(space, w_unicode): -# from pypy.objspace.std.newformat import unicode_template_formatter -# tformat = unicode_template_formatter(space, space.unicode_w(w_unicode)) -# return tformat.formatter_parser() -# -#def unicode_formatter_field_name_split__ANY(space, w_unicode): -# from pypy.objspace.std.newformat import unicode_template_formatter -# tformat = unicode_template_formatter(space, space.unicode_w(w_unicode)) -# return tformat.formatter_field_name_split() - # stuff imported from bytesobject for interoperability @@ -459,6 +459,9 @@ __format__ = interp2app(W_UnicodeObject.descr__format__), __mod__ = interp2app(W_UnicodeObject.descr_mod), __getnewargs__ = interp2app(W_UnicodeObject.descr_getnewargs), + _formatter_parser = interp2app(W_UnicodeObject.descr_formatter_parser), + _formatter_field_name_split = + interp2app(W_UnicodeObject.descr_formatter_field_name_split), ) unitypedef = unicode_typedef From noreply at buildbot.pypy.org Sat Jul 27 12:55:03 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 12:55:03 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix translation tentatively. Message-ID: <20130727105503.9DD8C1C11A9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65704:fdd90d4b1588 Date: 2013-07-27 12:52 +0200 http://bitbucket.org/pypy/pypy/changeset/fdd90d4b1588/ Log: Fix translation tentatively. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -20,6 +20,9 @@ from rpython.rlib.rstring import StringBuilder +def _make_data(s): + return [s[i] for i in range(len(s))] + class W_BytearrayObject(W_Object, StringMethods): def __init__(w_self, data): w_self.data = data @@ -29,7 +32,7 @@ return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def _new(self, value): - return W_BytearrayObject(list(value)) + return W_BytearrayObject(_make_data(value)) def _len(self): return len(self.data) @@ -48,7 +51,7 @@ _builder = StringBuilder def _newlist_unwrapped(self, space, res): - return space.newlist([W_BytearrayObject(list(i)) for i in res]) + return space.newlist([W_BytearrayObject(_make_data(i)) for i in res]) def _isupper(self, ch): return ch.isupper() From noreply at buildbot.pypy.org Sat Jul 27 12:59:45 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 12:59:45 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix another list(). Message-ID: <20130727105945.5BD7A1C11A9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65705:cfbc930a7b52 Date: 2013-07-27 12:57 +0200 http://bitbucket.org/pypy/pypy/changeset/cfbc930a7b52/ Log: Fix another list(). diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -182,8 +182,8 @@ return space.add(self_as_unicode, w_other) elif space.isinstance_w(w_other, space.w_bytearray): # XXX: eliminate double-copy - from .bytearrayobject import W_BytearrayObject - self_as_bytearray = W_BytearrayObject(list(self._value)) + from .bytearrayobject import W_BytearrayObject, _make_data + self_as_bytearray = W_BytearrayObject(_make_data(self._value)) return space.add(self_as_bytearray, w_other) return StringMethods.descr_add(self, space, w_other) From noreply at buildbot.pypy.org Sat Jul 27 13:48:31 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 27 Jul 2013 13:48:31 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add myself Message-ID: <20130727114831.1953A1C0113@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5009:2f1f5ea3bcc6 Date: 2013-07-27 13:48 +0200 http://bitbucket.org/pypy/extradoc/changeset/2f1f5ea3bcc6/ Log: add myself diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -19,6 +19,7 @@ Richard Emslie 25/8-2/9 some hotel Remi Meier 24/8-1/9 ? Marko Bencun 24/8-1/9 ? +Maciej Fijalkowski 25/8-1/9 private ==================== ============== ======================= From noreply at buildbot.pypy.org Sat Jul 27 14:19:53 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 14:19:53 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add app-level unicode.isdecimal and unicode.isnumeric. Message-ID: <20130727121953.E43731C1055@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65706:5e677015dc17 Date: 2013-07-27 14:17 +0200 http://bitbucket.org/pypy/pypy/changeset/5e677015dc17/ Log: Add app-level unicode.isdecimal and unicode.isnumeric. diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -888,3 +888,15 @@ assert b == u'hello \u1234' assert u'%s' % S(u'mar\xe7') == u'mar\xe7' + + def test_isdecimal(self): + assert u'0'.isdecimal() + assert not u''.isdecimal() + assert not u'a'.isdecimal() + assert not u'\u2460'.isdecimal() # CIRCLED DIGIT ONE + + def test_isnumeric(self): + assert u'0'.isnumeric() + assert not u''.isnumeric() + assert not u'a'.isnumeric() + assert u'\u2460'.isnumeric() # CIRCLED DIGIT ONE diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -96,6 +96,9 @@ def _islower(self, ch): return unicodedb.islower(ord(ch)) + def _isnumeric(self, ch): + return unicodedb.isnumeric(ord(ch)) + def _istitle(self, ch): return unicodedb.istitle(ord(ch)) @@ -111,6 +114,9 @@ def _isdigit(self, ch): return unicodedb.isdigit(ord(ch)) + def _isdecimal(self, ch): + return unicodedb.isdecimal(ord(ch)) + def _iscased(self, ch): return unicodedb.iscased(ord(ch)) @@ -205,6 +211,12 @@ tformat = unicode_template_formatter(space, space.unicode_w(self)) return tformat.formatter_field_name_split() + def descr_isdecimal(self, space): + return self._is_generic(space, '_isdecimal') + + def descr_isnumeric(self, space): + return self._is_generic(space, '_isnumeric') + def wrapunicode(space, uni): return W_UnicodeObject(uni) @@ -429,8 +441,10 @@ rindex = interp2app(W_UnicodeObject.descr_rindex), isalnum = interp2app(W_UnicodeObject.descr_isalnum), isalpha = interp2app(W_UnicodeObject.descr_isalpha), + isdecimal = interp2app(W_UnicodeObject.descr_isdecimal), isdigit = interp2app(W_UnicodeObject.descr_isdigit), islower = interp2app(W_UnicodeObject.descr_islower), + isnumeric = interp2app(W_UnicodeObject.descr_isnumeric), isspace = interp2app(W_UnicodeObject.descr_isspace), istitle = interp2app(W_UnicodeObject.descr_istitle), isupper = interp2app(W_UnicodeObject.descr_isupper), From noreply at buildbot.pypy.org Sat Jul 27 14:20:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 14:20:14 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: add 'I would like to share' in my location info Message-ID: <20130727122014.598731C1055@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5010:7daea3eaeb9b Date: 2013-07-27 14:20 +0200 http://bitbucket.org/pypy/extradoc/changeset/7daea3eaeb9b/ Log: add 'I would like to share' in my location info diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -15,7 +15,7 @@ Romain Guillebert ? ? Laurence Tratt lives there Edd Barrett ? ? -Armin Rigo ? ? +Armin Rigo ? hotel I'd like to share Richard Emslie 25/8-2/9 some hotel Remi Meier 24/8-1/9 ? Marko Bencun 24/8-1/9 ? From noreply at buildbot.pypy.org Sat Jul 27 14:24:42 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 27 Jul 2013 14:24:42 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: small cleanup Message-ID: <20130727122442.33B061C1055@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65707:c2d63fec53bb Date: 2013-07-26 19:02 +0100 http://bitbucket.org/pypy/pypy/changeset/c2d63fec53bb/ Log: small cleanup diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -233,7 +233,7 @@ assert self.ll_to_string(res) == 'hello world' def test_os_lseek(self): - self._skip_llinterpreter("os.lseek", skipOO=False) + self._skip_llinterpreter("os.lseek") tmpfile = str(udir.udir.join("os_lseek_test")) f = file(tmpfile, 'w') f.write('0123456789') @@ -400,7 +400,6 @@ assert self.class_name(res) == 'B' def test_os_path_join(self): - self._skip_llinterpreter("os path oofakeimpl", skipLL=False) def fn(a, b): return os.path.join(a, b) res = self.ll_to_string(self.interpret(fn, ['a', 'b'])) diff --git a/rpython/rtyper/test/tool.py b/rpython/rtyper/test/tool.py --- a/rpython/rtyper/test/tool.py +++ b/rpython/rtyper/test/tool.py @@ -35,12 +35,8 @@ def is_of_type(self, x, type_): return type(x) is type_ - def _skip_llinterpreter(self, reason, skipLL=True, skipOO=True): - if skipLL and self.type_system == 'lltype': - py.test.skip("lltypesystem doesn't support %s, yet" % reason) - if skipOO and self.type_system == 'ootype': - py.test.skip("ootypesystem doesn't support %s, yet" % reason) - + def _skip_llinterpreter(self, reason): + py.test.skip("lltypesystem doesn't support %s, yet" % reason) def ll_to_string(self, s): if not s: From noreply at buildbot.pypy.org Sat Jul 27 14:24:43 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 27 Jul 2013 14:24:43 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: Kill USE_SHORT_FLOAT_REPR (make it always True) Message-ID: <20130727122443.A35CB1C1055@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65708:7a0e4c894994 Date: 2013-07-27 13:24 +0100 http://bitbucket.org/pypy/pypy/changeset/7a0e4c894994/ Log: Kill USE_SHORT_FLOAT_REPR (make it always True) diff --git a/pypy/module/sys/system.py b/pypy/module/sys/system.py --- a/pypy/module/sys/system.py +++ b/pypy/module/sys/system.py @@ -57,4 +57,4 @@ return space.call_function(w_long_info, space.newtuple(info_w)) def get_float_repr_style(space): - return space.wrap("short" if rfloat.USE_SHORT_FLOAT_REPR else "legacy") + return space.wrap("short") diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -321,13 +321,6 @@ 'jit': DEFL_GC + ' extraopts jit', } -def final_check_config(config): - # XXX: this should be a real config option, but it is hard to refactor it; - # instead, we "just" patch it from here - from rpython.rlib import rfloat - if config.translation.type_system == 'ootype': - rfloat.USE_SHORT_FLOAT_REPR = False - def set_opt_level(config, level): """Apply optimization suggestions on the 'config'. The optimizations depend on the selected level and possibly on the backend. diff --git a/rpython/rlib/rfloat.py b/rpython/rlib/rfloat.py --- a/rpython/rlib/rfloat.py +++ b/rpython/rlib/rfloat.py @@ -9,8 +9,6 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo -USE_SHORT_FLOAT_REPR = True # XXX make it a translation option? - class CConfig: _compilation_info_ = ExternalCompilationInfo(includes=["float.h"]) @@ -61,80 +59,8 @@ raise ParseStringError("invalid literal for float(): '%s'" % s) def rstring_to_float(s): - return rstring_to_float_impl(s) - -def rstring_to_float_impl(s): - if USE_SHORT_FLOAT_REPR: - from rpython.rlib.rdtoa import strtod - return strtod(s) - sign, before_point, after_point, exponent = break_up_float(s) - if not before_point and not after_point: - raise ValueError - return parts_to_float(sign, before_point, after_point, exponent) - -register_external(rstring_to_float, [SomeString(can_be_None=False)], float, - llimpl=rstring_to_float_impl, sandboxsafe=True) - - -# float as string -> sign, beforept, afterpt, exponent -def break_up_float(s): - i = 0 - - sign = '' - before_point = '' - after_point = '' - exponent = '' - - if s[i] in '+-': - sign = s[i] - i += 1 - - while i < len(s) and s[i] in '0123456789': - before_point += s[i] - i += 1 - - if i == len(s): - return sign, before_point, after_point, exponent - - if s[i] == '.': - i += 1 - while i < len(s) and s[i] in '0123456789': - after_point += s[i] - i += 1 - - if i == len(s): - return sign, before_point, after_point, exponent - - if s[i] not in 'eE': - raise ValueError - - i += 1 - if i == len(s): - raise ValueError - - if s[i] in '-+': - exponent += s[i] - i += 1 - - if i == len(s): - raise ValueError - - while i < len(s) and s[i] in '0123456789': - exponent += s[i] - i += 1 - - if i != len(s): - raise ValueError - - return sign, before_point, after_point, exponent - -# string -> float helper - -def parts_to_float(sign, beforept, afterpt, exponent): - "NOT_RPYTHON" - if not exponent: - exponent = '0' - return float("%s%s.%se%s" % (sign, beforept, afterpt, exponent)) + from rpython.rlib.rdtoa import strtod + return strtod(s) # float -> string @@ -148,42 +74,10 @@ DIST_NAN = 2 DIST_INFINITY = 3 -# Equivalent to CPython's PyOS_double_to_string -def _formatd(x, code, precision, flags): - "NOT_RPYTHON" - if flags & DTSF_ALT: - alt = '#' - else: - alt = '' - - if code == 'r': - fmt = "%r" - else: - fmt = "%%%s.%d%s" % (alt, precision, code) - s = fmt % (x,) - - if flags & DTSF_ADD_DOT_0: - # We want float numbers to be recognizable as such, - # i.e., they should contain a decimal point or an exponent. - # However, %g may print the number as an integer; - # in such cases, we append ".0" to the string. - for c in s: - if c in '.eE': - break - else: - s += '.0' - elif code == 'r' and s.endswith('.0'): - s = s[:-2] - - return s - @objectmodel.enforceargs(float, SomeChar(), int, int) def formatd(x, code, precision, flags=0): - if USE_SHORT_FLOAT_REPR: - from rpython.rlib.rdtoa import dtoa_formatd - return dtoa_formatd(x, code, precision, flags) - else: - return _formatd(x, code, precision, flags) + from rpython.rlib.rdtoa import dtoa_formatd + return dtoa_formatd(x, code, precision, flags) def double_to_string(value, tp, precision, flags): if isfinite(value): @@ -200,12 +94,6 @@ Specify half_even=True to round half even instead. """ - if USE_SHORT_FLOAT_REPR: - return round_double_short_repr(value, ndigits, half_even) - else: - return round_double_fallback_repr(value, ndigits, half_even) - -def round_double_short_repr(value, ndigits, half_even): # The basic idea is very simple: convert and round the double to # a decimal string using _Py_dg_dtoa, then convert that decimal # string back to a double with _Py_dg_strtod. There's one minor @@ -290,46 +178,6 @@ return sign * rstring_to_float(strvalue) -# fallback version, to be used when correctly rounded -# binary<->decimal conversions aren't available -def round_double_fallback_repr(value, ndigits, half_even): - if ndigits >= 0: - if ndigits > 22: - # pow1 and pow2 are each safe from overflow, but - # pow1*pow2 ~= pow(10.0, ndigits) might overflow - pow1 = math.pow(10.0, ndigits - 22) - pow2 = 1e22 - else: - pow1 = math.pow(10.0, ndigits) - pow2 = 1.0 - - y = (value * pow1) * pow2 - # if y overflows, then rounded value is exactly x - if isinf(y): - return value - - else: - pow1 = math.pow(10.0, -ndigits); - pow2 = 1.0 # unused; for translation - y = value / pow1 - - if half_even: - z = round_away(y) - if math.fabs(y - z) == 0.5: - z = 2.0 * round_away(y / 2.0) - else: - if y >= 0.0: - z = math.floor(y + 0.5) - else: - z = math.ceil(y - 0.5) - if math.fabs(y - z) == 1.0: # obscure case, see the test - z = y - - if ndigits >= 0: - z = (z / pow2) / pow1 - else: - z *= pow1 - return z INFINITY = 1e200 * 1e200 NAN = abs(INFINITY / INFINITY) # bah, INF/INF gives us -NAN? diff --git a/rpython/rlib/test/test_rfloat.py b/rpython/rlib/test/test_rfloat.py --- a/rpython/rlib/test/test_rfloat.py +++ b/rpython/rlib/test/test_rfloat.py @@ -1,7 +1,6 @@ import sys, py from rpython.rlib.rfloat import float_as_rbigint_ratio -from rpython.rlib.rfloat import break_up_float from rpython.rlib.rfloat import copysign from rpython.rlib.rfloat import round_away from rpython.rlib.rfloat import round_double @@ -88,32 +87,11 @@ def test_round_half_even(): from rpython.rlib import rfloat - for func in (rfloat.round_double_short_repr, - rfloat.round_double_fallback_repr): - # 2.x behavior - assert func(2.5, 0, False) == 3.0 - # 3.x behavior - assert func(2.5, 0, True) == 2.0 - -def test_break_up_float(): - assert break_up_float('1') == ('', '1', '', '') - assert break_up_float('+1') == ('+', '1', '', '') - assert break_up_float('-1') == ('-', '1', '', '') - - assert break_up_float('.5') == ('', '', '5', '') - - assert break_up_float('1.2e3') == ('', '1', '2', '3') - assert break_up_float('1.2e+3') == ('', '1', '2', '+3') - assert break_up_float('1.2e-3') == ('', '1', '2', '-3') - - # some that will get thrown out on return: - assert break_up_float('.') == ('', '', '', '') - assert break_up_float('+') == ('+', '', '', '') - assert break_up_float('-') == ('-', '', '', '') - assert break_up_float('e1') == ('', '', '', '1') - - py.test.raises(ValueError, break_up_float, 'e') - + func = rfloat.round_double + # 2.x behavior + assert func(2.5, 0, False) == 3.0 + # 3.x behavior + assert func(2.5, 0, True) == 2.0 def test_float_as_rbigint_ratio(): for f, ratio in [ diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -85,7 +85,3 @@ llimpl = func_with_new_name(func, name) register_external(func, args, res, 'll_os_path.ll_%s' % name, llimpl=llimpl, sandboxsafe=True) - -# -------------------- strtod functions ---------------------- - -from rpython.rtyper.module import ll_strtod diff --git a/rpython/rtyper/module/ll_strtod.py b/rpython/rtyper/module/ll_strtod.py deleted file mode 100644 --- a/rpython/rtyper/module/ll_strtod.py +++ /dev/null @@ -1,107 +0,0 @@ - -import py -from rpython.rtyper.extfunc import BaseLazyRegistering, extdef, registering -from rpython.rlib import rfloat -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rposix -from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir -from rpython.annotator.model import SomeString - -class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes = ['src/ll_strtod.h'], - include_dirs = [str(py.path.local(cdir))], - separate_module_sources = ['#include '], - export_symbols = ['LL_strtod_formatd', 'LL_strtod_parts_to_float'], - ) - -class RegisterStrtod(BaseLazyRegistering): - def __init__(self): - self.configure(CConfig) - - @registering(rfloat._formatd) - def register_formatd(self): - ll_strtod = self.llexternal('LL_strtod_formatd', - [rffi.DOUBLE, rffi.CHAR, rffi.INT], rffi.CCHARP, - sandboxsafe=True, threadsafe=False) - - # Like PyOS_double_to_string(), when PY_NO_SHORT_FLOAT_REPR is defined - def llimpl(x, code, precision, flags): - upper = False - if code == 'r': - code = 'g' - precision = 17 - elif code == 'E': - code = 'e' - upper = True - elif code == 'F': - code = 'f' - upper = True - elif code == 'G': - code = 'g' - upper = True - - res = ll_strtod(x, code, precision) - s = rffi.charp2str(res) - - if flags & rfloat.DTSF_ADD_DOT_0: - s = ensure_decimal_point(s, precision) - - # Add sign when requested - if flags & rfloat.DTSF_SIGN and s[0] != '-': - s = '+' + s - - # Convert to upper case - if upper: - s = s.upper() - - return s - - return extdef([float, lltype.Char, int, int], - SomeString(can_be_None=True), - 'll_strtod.ll_strtod_formatd', - llimpl=llimpl, sandboxsafe=True) - - @registering(rfloat.parts_to_float) - def register_parts_to_float(self): - ll_parts_to_float = self.llexternal('LL_strtod_parts_to_float', - [rffi.CCHARP] * 4, rffi.DOUBLE, - sandboxsafe=True, - threadsafe=False) - - def llimpl(sign, beforept, afterpt, exponent): - res = ll_parts_to_float(sign, beforept, afterpt, exponent) - if res == -1 and rposix.get_errno() == 42: - raise ValueError("Wrong literal for float") - return res - - tp = SomeString(can_be_None=True) - return extdef([tp, tp, tp, tp], float, - 'll_strtod.ll_strtod_parts_to_float', llimpl=llimpl, - sandboxsafe=True) - -def ensure_decimal_point(s, precision): - # make sure we have at least one character after the decimal point (and - # make sure we have a decimal point); also switch to exponential notation - # in some edge cases where the extra character would produce more - # significant digits that we really want. - - pos = s.find('.') - if pos >= 0: - if pos + 1 < len(s) and s[pos + 1].isdigit(): - # Nothing to do, we already have a decimal point - # and a digit after it - pass - else: - # Normally not used - s += '0' - else: - pos = s.find('e') - if pos >= 0: - # Don't add ".0" if we have an exponent - pass - else: - s += '.0' - - return s diff --git a/rpython/rtyper/module/test/test_ll_strtod.py b/rpython/rtyper/module/test/test_ll_strtod.py --- a/rpython/rtyper/module/test/test_ll_strtod.py +++ b/rpython/rtyper/module/test/test_ll_strtod.py @@ -11,24 +11,3 @@ return rfloat.formatd(y, 'g', 2, flags) assert self.ll_to_string(self.interpret(f, [3.0])) == f(3.0) - - def test_parts_to_float(self): - from rpython.rtyper.annlowlevel import hlstr - - def f(a, b, c, d): - a,b,c,d = hlstr(a), hlstr(b), hlstr(c), hlstr(d) - - return rfloat.parts_to_float(a, b, c, d) - - data = [ - (("","1","","") , 1.0), - (("-","1","","") , -1.0), - (("-","1","5","") , -1.5), - (("-","1","5","2") , -1.5e2), - (("-","1","5","+2") , -1.5e2), - (("-","1","5","-2") , -1.5e-2), - ] - - for parts, val in data: - args = [self.string_to_ll(i) for i in parts] - assert self.interpret(f, args) == val diff --git a/rpython/rtyper/test/test_rfloat.py b/rpython/rtyper/test/test_rfloat.py --- a/rpython/rtyper/test/test_rfloat.py +++ b/rpython/rtyper/test/test_rfloat.py @@ -241,21 +241,6 @@ f = compile(func, [float]) assert f(10/3.0) == '3.3333' - def test_parts_to_float(self): - from rpython.rlib.rfloat import parts_to_float, break_up_float - def f(x): - if x == 0: - s = '1.0' - else: - s = '1e-100' - sign, beforept, afterpt, expt = break_up_float(s) - return parts_to_float(sign, beforept, afterpt, expt) - res = self.interpret(f, [0]) - assert res == 1.0 - - res = self.interpret(f, [1]) - assert res == 1e-100 - def test_string_to_float(self): from rpython.rlib.rfloat import rstring_to_float def func(x): diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -264,25 +264,6 @@ assert t0 <= res <= t1 -def test_parts_to_float(): - from rpython.rlib.rfloat import parts_to_float - def fn(sign, beforept, afterpt, exponent): - return parts_to_float(sign, beforept, afterpt, exponent) - - f = compile(fn, [str, str, str, str]) - - data = [ - (("","1","","") , 1.0), - (("-","1","","") , -1.0), - (("-","1","5","") , -1.5), - (("-","1","5","2") , -1.5e2), - (("-","1","5","+2") , -1.5e2), - (("-","1","5","-2") , -1.5e-2), - ] - - for parts, val in data: - assert f(*parts) == val - def test_formatd(): from rpython.rlib.rfloat import formatd def fn(x): @@ -453,7 +434,7 @@ f1 = compile(does_stuff, []) res = f1() assert res != os.getpid() - + if hasattr(os, 'getpgrp'): def test_os_getpgrp(): def does_stuff(): diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -19,7 +19,7 @@ from rpython.config.config import (to_optparse, OptionDescription, BoolOption, ArbitraryOption, StrOption, IntOption, Config, ChoiceOption, OptHelpFormatter) from rpython.config.translationoption import (get_combined_translation_config, - set_opt_level, final_check_config, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform) + set_opt_level, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform) GOALS = [ @@ -180,9 +180,6 @@ if 'handle_config' in targetspec_dic: targetspec_dic['handle_config'](config, translateconfig) - # perform checks (if any) on the final config - final_check_config(config) - return targetspec_dic, translateconfig, config, args def show_help(translateconfig, opt_parser, targetspec_dic, config): From noreply at buildbot.pypy.org Sat Jul 27 14:49:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 27 Jul 2013 14:49:16 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: fix test flatten Message-ID: <20130727124916.E36FC1C0113@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65709:4f3394996060 Date: 2013-07-27 14:48 +0200 http://bitbucket.org/pypy/pypy/changeset/4f3394996060/ Log: fix test flatten diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -4,6 +4,7 @@ from rpython.jit.codewriter.flatten import GraphFlattener, ListOfKind, Register from rpython.jit.codewriter.format import assert_format from rpython.jit.codewriter import longlong +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import AbstractDescr from rpython.rtyper.lltypesystem import lltype, rclass, rstr, rffi from rpython.flowspace.model import SpaceOperation, Variable, Constant @@ -71,8 +72,8 @@ callinfocollection = FakeCallInfoCollection() def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None, - extradescrs=None): + def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, + extraeffect=None, extradescrs=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): @@ -81,7 +82,7 @@ pass return FakeDescr(oopspecindex) def calldescr_canraise(self, calldescr): - return calldescr is not self._descr_cannot_raise and calldescr.oopspecindex is None + return calldescr is not self._descr_cannot_raise and calldescr.oopspecindex == EffectInfo.OS_NONE def get_vinfo(self, VTYPEPTR): return None From noreply at buildbot.pypy.org Sat Jul 27 14:54:00 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 27 Jul 2013 14:54:00 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: argh. Fix an obvious bug in asmgcc handling Message-ID: <20130727125400.C257C1C11A9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65710:6f0c3308111a Date: 2013-07-27 14:53 +0200 http://bitbucket.org/pypy/pypy/changeset/6f0c3308111a/ Log: argh. Fix an obvious bug in asmgcc handling diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -157,12 +157,13 @@ self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) if IS_X86_64: mc.SUB(esp, imm(WORD)) + self.set_extra_stack_depth(mc, 2 * WORD) else: # we want space for 3 arguments + call + alignment # the caller is responsible for putting arguments in the right spot mc.SUB(esp, imm(WORD * 7)) - self.set_extra_stack_depth(mc, 2 * WORD) - # args are in their respective positions + self.set_extra_stack_depth(mc, 8 * WORD) + # args are in their respective positions mc.CALL(eax) if IS_X86_64: mc.ADD(esp, imm(WORD)) From noreply at buildbot.pypy.org Sat Jul 27 15:01:20 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 27 Jul 2013 15:01:20 +0200 (CEST) Subject: [pypy-commit] pypy default: skip this test if we can't find objdump (should make it work on OS X) Message-ID: <20130727130120.9A8AF1C11A9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65711:3a4776c59dcc Date: 2013-07-27 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/3a4776c59dcc/ Log: skip this test if we can't find objdump (should make it work on OS X) diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -1,6 +1,8 @@ +import py import re from rpython.tool.logparser import extract_category +from rpython.jit.backend.tool.viewcode import ObjdumpNotFound from pypy.tool.jitlogparser.parser import (import_log, parse_log_counts, mangle_descr) @@ -41,7 +43,10 @@ lib_re = re.compile("file '.*lib-python.*'") for loop in loops: if hasattr(loop, 'force_asm'): - loop.force_asm() + try: + loop.force_asm() + except ObjdumpNotFound: + py.test.skip("ObjDump was not found, skipping") if lib_re.search(loop.comment) or \ lib_re.search(loop.operations[0].repr()): # do not care for _optimize_charset or _mk_bitmap @@ -60,7 +65,7 @@ by_count = lambda l: -l.count is_prime_loops.sort(key=by_count) fn_with_bridges_loops.sort(key=by_count) - + # check that we can find bridges corresponding to " % 3" and " % 5" mod_bridges = [] for op in fn_with_bridges_loops[0].operations: @@ -69,7 +74,7 @@ if bridge is not None: mod_bridges.append(bridge) assert len(mod_bridges) in (1, 2) - + # check that counts are reasonable (precise # may change in the future) assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -36,6 +36,9 @@ if sys.platform == "win32": pass # lots more in Psyco +class ObjdumpNotFound(Exception): + pass + def find_objdump(): exe = ('objdump', 'gobjdump') path = os.environ['PATH'].split(os.pathsep) @@ -45,7 +48,7 @@ if not os.path.exists(path_to): continue return e - raise AssertionError('(g)objdump was not found in PATH') + raise ObjdumpNotFound('(g)objdump was not found in PATH') def machine_code_dump(data, originaddr, backend_name, label_list=None): objdump_backend_option = { @@ -326,7 +329,7 @@ color = "black" else: color = "red" - g1.emit_edge('N_%x' % r.addr, 'N_%x' % targetaddr, + g1.emit_edge('N_%x' % r.addr, 'N_%x' % targetaddr, color=color) sys.stdout.flush() if showgraph: From noreply at buildbot.pypy.org Sat Jul 27 16:15:59 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 16:15:59 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add more auto-conversion fun. Message-ID: <20130727141559.481891C00B1@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65712:e4c68588a0de Date: 2013-07-27 14:41 +0200 http://bitbucket.org/pypy/pypy/changeset/e4c68588a0de/ Log: Add more auto-conversion fun. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -13,7 +13,7 @@ decode_object, unicode_from_encoded_object, _get_encoding_and_errors) from rpython.rlib.jit import we_are_jitted from rpython.rlib.objectmodel import compute_hash, compute_unique_id -from rpython.rlib.rstring import StringBuilder +from rpython.rlib.rstring import StringBuilder, replace class W_AbstractBytesObject(W_Object): @@ -199,6 +199,27 @@ return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) return StringMethods._endswith(self, space, value, w_suffix, start, end) + @unwrap_spec(count=int) + def descr_replace(self, space, w_old, w_new, count=-1): + old_is_unicode = space.isinstance_w(w_old, space.w_unicode) + new_is_unicode = space.isinstance_w(w_new, space.w_unicode) + if old_is_unicode or new_is_unicode: + self_as_uni = unicode_from_encoded_object(space, self, None, None) + if not old_is_unicode: + w_old = unicode_from_encoded_object(space, w_old, None, None) + if not new_is_unicode: + w_new = unicode_from_encoded_object(space, w_new, None, None) + input = self_as_uni._val(space) + sub = self_as_uni._op_val(space, w_old) + by = self_as_uni._op_val(space, w_new) + try: + res = replace(input, sub, by, count) + except OverflowError: + raise OperationError(space.w_OverflowError, + space.wrap("replace string is too long")) + return self_as_uni._new(res) + return StringMethods.descr_replace(self, space, w_old, w_new, count) + def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or space.is_w(space.type(w_obj), space.w_unicode)) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -900,3 +900,8 @@ assert not u''.isnumeric() assert not u'a'.isnumeric() assert u'\u2460'.isnumeric() # CIRCLED DIGIT ONE + + def test_replace_autoconvert(self): + res = 'one!two!three!'.replace(u'!', u'@', 1) + assert res == u'one at two!three!' + assert type(res) == unicode From noreply at buildbot.pypy.org Sat Jul 27 16:16:00 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 16:16:00 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add missing import. Message-ID: <20130727141600.8B0891C0113@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65713:dbcdd32fe31f Date: 2013-07-27 16:13 +0200 http://bitbucket.org/pypy/pypy/changeset/dbcdd32fe31f/ Log: Add missing import. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -1,7 +1,7 @@ """The builtin str implementation""" from pypy.interpreter.buffer import StringBuffer -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef From noreply at buildbot.pypy.org Sat Jul 27 16:26:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 16:26:50 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Fix the strange error of the debugging mprotect() calls by simply Message-ID: <20130727142650.4AEBB1C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r463:34dc06192796 Date: 2013-07-27 16:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/34dc06192796/ Log: Fix the strange error of the debugging mprotect() calls by simply running the example for less long. http://stackoverflow.com/questions/17896297/mprotect-does-not- always-collapse-adjacent-lines-in-proc-pid-maps diff --git a/c4/demo2.c b/c4/demo2.c --- a/c4/demo2.c +++ b/c4/demo2.c @@ -8,7 +8,7 @@ #include "fprintcolor.h" -#define LIST_LENGTH 500 +#define LIST_LENGTH 200 #define NUMTHREADS 4 @@ -89,7 +89,7 @@ if (r_next->value < r_current->value) { // swap current and next swap_nodes(r_prev, r_current, r_next); - fprintf(stdout, "#"); + //fprintf(stdout, "#"); // needs read barriers, because of write barriers in swap_nodes r_prev = (struct node*)stm_read_barrier((gcptr)r_prev); From noreply at buildbot.pypy.org Sat Jul 27 16:33:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 16:33:54 +0200 (CEST) Subject: [pypy-commit] stmgc default: hg merge copy-over-original2 Message-ID: <20130727143354.11F381C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r464:0a5e1481435c Date: 2013-07-27 16:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/0a5e1481435c/ Log: hg merge copy-over-original2 * during major gc copy the object back over the original * use more generally 'h_original' as a pointer to the most official version of the object * includes the 'weakref' branch diff too long, truncating to 2000 out of 2821 lines diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -16,10 +16,10 @@ H_FILES = atomic_ops.h stmgc.h stmimpl.h \ et.h lists.h steal.h nursery.h gcpage.h \ - stmsync.h extra.h dbgmem.h fprintcolor.h + stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h C_FILES = et.c lists.c steal.c nursery.c gcpage.c \ - stmsync.c extra.c dbgmem.c fprintcolor.c + stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 -DDUMP_EXTRA=1 -D_GC_DEBUGPRINTS=1 diff --git a/c4/dbgmem.c b/c4/dbgmem.c --- a/c4/dbgmem.c +++ b/c4/dbgmem.c @@ -8,7 +8,7 @@ #ifdef _GC_DEBUG /************************************************************/ -#define MMAP_TOTAL 671088640 /* 640MB */ +#define MMAP_TOTAL 1280*1024*1024 /* 1280MB */ static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; static char *zone_start, *zone_current = NULL, *zone_end = NULL; @@ -70,6 +70,10 @@ void stm_free(void *p, size_t sz) { + if (p == NULL) { + assert(sz == 0); + return; + } assert(((intptr_t)((char *)p + sz) & (PAGE_SIZE-1)) == 0); size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; @@ -83,6 +87,14 @@ _stm_dbgmem(p, sz, PROT_NONE); } +void *stm_realloc(void *p, size_t newsz, size_t oldsz) +{ + void *r = stm_malloc(newsz); + memcpy(r, p, oldsz < newsz ? oldsz : newsz); + stm_free(p, oldsz); + return r; +} + int _stm_can_access_memory(char *p) { long base = ((char *)p - zone_start) / PAGE_SIZE; diff --git a/c4/dbgmem.h b/c4/dbgmem.h --- a/c4/dbgmem.h +++ b/c4/dbgmem.h @@ -6,6 +6,7 @@ void *stm_malloc(size_t); void stm_free(void *, size_t); +void *stm_realloc(void *, size_t, size_t); int _stm_can_access_memory(char *); void assert_cleared(char *, size_t); @@ -13,6 +14,7 @@ #define stm_malloc(sz) malloc(sz) #define stm_free(p,sz) free(p) +#define stm_realloc(p,newsz,oldsz) realloc(p,newsz) #define assert_cleared(p,sz) do { } while(0) #endif diff --git a/c4/demo2.c b/c4/demo2.c --- a/c4/demo2.c +++ b/c4/demo2.c @@ -8,7 +8,7 @@ #include "fprintcolor.h" -#define LIST_LENGTH 500 +#define LIST_LENGTH 200 #define NUMTHREADS 4 @@ -89,7 +89,7 @@ if (r_next->value < r_current->value) { // swap current and next swap_nodes(r_prev, r_current, r_next); - fprintf(stdout, "#"); + //fprintf(stdout, "#"); // needs read barriers, because of write barriers in swap_nodes r_prev = (struct node*)stm_read_barrier((gcptr)r_prev); diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -25,27 +25,46 @@ // SUPPORT #define GCTID_STRUCT_NODE 123 +#define GCTID_WEAKREF 122 + +struct node; +typedef struct node * nodeptr; +struct weak_node { + struct stm_object_s hdr; + nodeptr node; +}; +typedef struct weak_node * weaknodeptr; +#define WEAKNODE_SIZE sizeof(struct weak_node) struct node { struct stm_object_s hdr; long value; revision_t id; revision_t hash; - struct node *next; + nodeptr next; + weaknodeptr weakref; }; -typedef struct node * nodeptr; + + size_t stmcb_size(gcptr ob) { - assert(stm_get_tid(ob) == GCTID_STRUCT_NODE); - return sizeof(struct node); + if (stm_get_tid(ob) == GCTID_STRUCT_NODE) + return sizeof(struct node); + else if (stm_get_tid(ob) == GCTID_WEAKREF) + return WEAKNODE_SIZE; + assert(0); } + void stmcb_trace(gcptr ob, void visit(gcptr *)) { nodeptr n; + if (stm_get_tid(ob) == GCTID_WEAKREF) + return; assert(stm_get_tid(ob) == GCTID_STRUCT_NODE); n = (nodeptr)ob; visit((gcptr *)&n->next); + visit((gcptr *)&n->weakref); } @@ -99,6 +118,21 @@ return (int)(rand_r(&td.thread_seed) % (unsigned int)max); } +gcptr get_random_root() +{ + int num = get_rand(td.num_roots + 1); + if (num == 0) + return stm_thread_local_obj; + else + return td.roots[num - 1]; +} + +gcptr get_random_shared_root() +{ + int num = get_rand(SHARED_ROOTS); + return shared_roots[num]; +} + void copy_roots(gcptr *from, gcptr *to, int num) { int i; @@ -167,6 +201,27 @@ return r; } + +weaknodeptr allocate_weaknodeptr(nodeptr to) +{ + weaknodeptr w; + push_roots(); + w = (weaknodeptr)stm_weakref_allocate(WEAKNODE_SIZE, GCTID_WEAKREF, + (gcptr)to); + pop_roots(); + return w; +} + +void set_weakref(nodeptr n, nodeptr to) +{ + stm_push_root((gcptr)n); + weaknodeptr w = allocate_weaknodeptr(to); + n = (nodeptr)stm_pop_root(); + n = (nodeptr)stm_write_barrier((gcptr)n); + n->weakref = w; + dprintf(("set_weakref %p -> %p -> %p\n", n, w, to)); +} + int is_shared_prebuilt(gcptr p) { int i; @@ -276,7 +331,7 @@ } else { if (in_nursery(p)) { - assert(p->h_tid & GCFLAG_NURSERY_MOVED); + assert(p->h_tid & GCFLAG_MOVED); assert(!(p->h_revision & 1)); } return C_PUBLIC; @@ -418,6 +473,46 @@ return p; } +gcptr weakref_events(gcptr p, gcptr _r, gcptr _sr) +{ + nodeptr t; + weaknodeptr w, ww; + gcptr ptrs[] = {_r, _sr}; + + int i = get_rand(2); + int k = get_rand(3); + switch (k) { + case 0: // check weakref + t = (nodeptr)read_barrier(ptrs[i]); + w = t->weakref; + if(w) { + ww = (weaknodeptr)stm_read_barrier((gcptr)w); + assert(stm_get_tid((gcptr)ww) == GCTID_WEAKREF); + if (ww->node) { + check((gcptr)ww->node); + } + else { + t->weakref = NULL; + } + } + p = NULL; + break; + case 1: // set weakref to something + if (p) + set_weakref((nodeptr)_r, (nodeptr)p); + else + set_weakref((nodeptr)_r, (nodeptr)get_random_root()); + p = NULL; + break; + case 2: // set weakref on shared roots + set_weakref((nodeptr)_sr, (nodeptr)get_random_shared_root()); + p = NULL; + break; + } + return p; +} + + gcptr shared_roots_events(gcptr p, gcptr _r, gcptr _sr) { nodeptr w_sr; @@ -432,7 +527,7 @@ break; case 2: w_sr = (nodeptr)write_barrier(_sr); - w_sr->next = (nodeptr)shared_roots[get_rand(SHARED_ROOTS)]; + w_sr->next = (nodeptr)get_random_shared_root(); break; } return p; @@ -491,18 +586,12 @@ gcptr do_step(gcptr p) { gcptr _r, _sr; - int num, k; + int k; - num = get_rand(td.num_roots+1); - if (num == 0) - _r = stm_thread_local_obj; - else - _r = td.roots[num - 1]; - - num = get_rand(SHARED_ROOTS); - _sr = shared_roots[num]; + _r = get_random_root(); + _sr = get_random_shared_root(); - k = get_rand(9); + k = get_rand(11); check(p); assert(thread_descriptor->active); @@ -514,6 +603,8 @@ p = id_hash_events(p, _r, _sr); else if (k < 8) p = rare_events(p, _r, _sr); + else if (k < 10) + p = weakref_events(p, _r, _sr); else if (get_rand(20) == 1) { // transaction break fprintf(stdout, "|"); @@ -636,11 +727,12 @@ { int i, status; - // seed changes daily - // a bit pointless for now.. + /* pick a random seed from the time in seconds. + A bit pointless for now... because the interleaving of the + threads is really random. */ default_seed = time(NULL); - default_seed -= (default_seed % (3600 * 24)); - + printf("running with seed=%lld\n", (long long)default_seed); + for (i = 0; i < SHARED_ROOTS; i++) { if (i % 3 == 0) { shared_roots[i] = allocate_pseudoprebuilt_with_hash( diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -6,6 +6,29 @@ */ #include "stmimpl.h" +#ifdef _GC_DEBUG +char tmp_buf[128]; +char* stm_dbg_get_hdr_str(gcptr obj) +{ + char *cur; + char *flags[] = GC_FLAG_NAMES; + int i; + + i = 0; + cur = tmp_buf; + cur += sprintf(cur, "%p:", obj); + while (flags[i]) { + if (obj->h_tid & (STM_FIRST_GCFLAG << i)) { + cur += sprintf(cur, "%s|", flags[i]); + } + i++; + } + cur += sprintf(cur, "tid=%ld", stm_get_tid(obj)); + return tmp_buf; +} +#endif + + __thread struct tx_descriptor *thread_descriptor = NULL; @@ -122,7 +145,7 @@ gcptr P_prev = P; P = (gcptr)v; assert((P->h_tid & GCFLAG_PUBLIC) || - (P_prev->h_tid & GCFLAG_NURSERY_MOVED)); + (P_prev->h_tid & GCFLAG_MOVED)); v = ACCESS_ONCE(P->h_revision); @@ -214,7 +237,7 @@ add_in_recent_reads_cache: /* The risks are that the following assert fails, because the flag was added just now by a parallel thread during stealing... */ - /*assert(!(P->h_tid & GCFLAG_NURSERY_MOVED));*/ + /*assert(!(P->h_tid & GCFLAG_MOVED));*/ fxcache_add(&d->recent_reads_cache, P); return P; @@ -257,7 +280,7 @@ */ if (P->h_tid & GCFLAG_PUBLIC) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) { P = (gcptr)P->h_revision; assert(P->h_tid & GCFLAG_PUBLIC); @@ -389,7 +412,7 @@ while (v = P->h_revision, IS_POINTER(v)) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) dprintf(("nursery_moved ")); if (v & 2) @@ -486,7 +509,7 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { assert(R->h_tid & GCFLAG_PUBLIC); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); #ifdef _GC_DEBUG wlog_t *entry; @@ -545,6 +568,14 @@ gcptr stm_WriteBarrier(gcptr P) { + assert(!(P->h_tid & GCFLAG_IMMUTABLE)); + assert((P->h_tid & GCFLAG_STUB) || + stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); + /* If stmgc_size(P) gives a number <= sizeof(stub)-WORD, then there is a + risk of overrunning the object later in gcpage.c when copying a stub + over it. However such objects are so small that they contain no field + at all, and so no write barrier should occur on them. */ + if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into @@ -581,7 +612,7 @@ Add R into the list 'public_with_young_copy', unless W is actually an old object, in which case we need to record W. */ - if (R->h_tid & GCFLAG_NURSERY_MOVED) + if (R->h_tid & GCFLAG_MOVED) { /* Bah, the object turned into this kind of stub, possibly while we were waiting for the collection_lock, because it @@ -671,8 +702,8 @@ continue; } } - else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) - == (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) + else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_MOVED)) + == (GCFLAG_PUBLIC | GCFLAG_MOVED)) { /* such an object is identical to the one it points to (stolen protected young object with h_revision pointing @@ -945,6 +976,7 @@ revision_t my_lock = d->my_lock; wlog_t *item; + dprintf(("acquire_locks\n")); assert(!stm_has_got_any_lock(d)); assert(d->public_descriptor->stolen_objects.size == 0); @@ -957,6 +989,7 @@ revision_t v; retry: assert(R->h_tid & GCFLAG_PUBLIC); + assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); v = ACCESS_ONCE(R->h_revision); if (IS_POINTER(v)) /* "has a more recent revision" */ { @@ -989,7 +1022,7 @@ static void CancelLocks(struct tx_descriptor *d) { wlog_t *item; - + dprintf(("cancel_locks\n")); if (!g2l_any_entry(&d->public_to_private)) return; @@ -1082,7 +1115,7 @@ assert(!(L->h_tid & GCFLAG_VISITED)); assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - assert(!(L->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(L->h_tid & GCFLAG_MOVED)); assert(L->h_revision != localrev); /* modified by AcquireLocks() */ #ifdef DUMP_EXTRA @@ -1094,7 +1127,9 @@ gcptr stub = stm_stub_malloc(d->public_descriptor, 0); stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB + | GCFLAG_SMALLSTUB | GCFLAG_OLD; + dprintf(("et.c: stm_stub_malloc -> %p\n", stub)); stub->h_revision = ((revision_t)L) | 2; assert(!(L->h_tid & GCFLAG_HAS_ID)); @@ -1129,7 +1164,7 @@ assert(R->h_tid & GCFLAG_PUBLIC); assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); assert(R->h_revision != localrev); #ifdef DUMP_EXTRA @@ -1224,7 +1259,7 @@ assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); P->h_tid |= GCFLAG_PUBLIC; assert(!(P->h_tid & GCFLAG_HAS_ID)); - if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_NURSERY_MOVED; + if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_MOVED; /* P becomes a public outdated object. It may create an exception documented in doc-objects.txt: a public but young object. It's still fine because it should only be seen by @@ -1257,7 +1292,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - + dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); @@ -1341,6 +1376,7 @@ d->active = 2; d->reads_size_limit_nonatomic = 0; update_reads_size_limit(d); + dprintf(("make_inevitable(%p)\n", d)); } static revision_t acquire_inev_mutex_and_mark_global_cur_time( diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -25,7 +25,11 @@ * * GCFLAG_OLD is set on old objects. * - * GCFLAG_VISITED is used temporarily during major collections. + * GCFLAG_VISITED and GCFLAG_MARKED are used temporarily during major + * collections. The objects are MARKED|VISITED as soon as they have been + * added to 'objects_to_trace', and so will be or have been traced. The + * objects are only MARKED if their memory must be kept alive, but (so far) + * we found that tracing them is not useful. * * GCFLAG_PUBLIC is set on public objects. * @@ -46,7 +50,7 @@ * the list 'old_objects_to_trace'; it is set again at the next minor * collection. * - * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. + * GCFLAG_MOVED is used temporarily during minor/major collections. * * GCFLAG_STUB is set for debugging on stub objects made by stealing or * by major collections. 'p_stub->h_revision' might be a value @@ -67,14 +71,20 @@ static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_NURSERY_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; +static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; +static const revision_t GCFLAG_SMALLSTUB /*debug*/ = STM_FIRST_GCFLAG << 12; +static const revision_t GCFLAG_MARKED = STM_FIRST_GCFLAG << 13; +/* warning, the last flag available is "<< 15" on 32-bit */ + /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ + GCFLAG_MARKED | \ GCFLAG_PREBUILT_ORIGINAL | \ GCFLAG_OLD | \ GCFLAG_PUBLIC) @@ -85,10 +95,14 @@ "PREBUILT_ORIGINAL", \ "PUBLIC_TO_PRIVATE", \ "WRITE_BARRIER", \ - "NURSERY_MOVED", \ + "MOVED", \ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ + "HAS_ID", \ + "IMMUTABLE", \ + "SMALLSTUB", \ + "MARKED", \ NULL } #define IS_POINTER(v) (!((v) & 1)) /* even-valued number */ @@ -196,4 +210,7 @@ void DescriptorInit(void); void DescriptorDone(void); +#ifdef _GC_DEBUG +char* stm_dbg_get_hdr_str(gcptr obj); +#endif #endif /* _ET_H */ diff --git a/c4/extra.c b/c4/extra.c --- a/c4/extra.c +++ b/c4/extra.c @@ -3,7 +3,7 @@ void stm_copy_to_old_id_copy(gcptr obj, gcptr id) { - //assert(!is_in_nursery(thread_descriptor, id)); + //assert(!stmgc_is_in_nursery(thread_descriptor, id)); assert(id->h_tid & GCFLAG_OLD); size_t size = stmgc_size(obj); @@ -131,17 +131,16 @@ _Bool stm_pointer_equal(gcptr p1, gcptr p2) { - /* fast path for two equal pointers */ - if (p1 == p2) - return 1; - /* if p1 or p2 is NULL (but not both, because they are different - pointers), then return 0 */ - if (p1 == NULL || p2 == NULL) - return 0; - /* types must be the same */ - if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) - return 0; - return stm_id(p1) == stm_id(p2); + if (p1 != NULL && p2 != NULL) { + /* resolve h_original, but only if !PREBUILT_ORIGINAL */ + if (p1->h_original && !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p1 = (gcptr)p1->h_original; + } + if (p2->h_original && !(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p2 = (gcptr)p2->h_original; + } + } + return (p1 == p2); } /************************************************************/ diff --git a/c4/gcpage.c b/c4/gcpage.c --- a/c4/gcpage.c +++ b/c4/gcpage.c @@ -212,147 +212,229 @@ static struct GcPtrList objects_to_trace; -static void keep_original_alive(gcptr obj) +static gcptr copy_over_original(gcptr obj, gcptr id_copy) { - /* keep alive the original of a visited object */ - gcptr id_copy = (gcptr)obj->h_original; - /* prebuilt original objects may have a predifined - hash in h_original */ - if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; - /* see fix_outdated() */ - id_copy->h_tid |= GCFLAG_VISITED; + assert(obj != id_copy); + assert(id_copy == (gcptr)obj->h_original); + assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ - /* XXX: may not always need tracing? */ - if (!(id_copy->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, id_copy); - } - else { - /* prebuilt originals won't get collected anyway - and if they are not reachable in any other way, - we only ever need their location, not their content */ + /* check a few flags */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + + assert(id_copy->h_tid & GCFLAG_PUBLIC); + assert(!(id_copy->h_tid & GCFLAG_BACKUP_COPY)); + + /* id_copy may be a stub, but in this case, as the original, it + should have been allocated with a big enough chunk of memory. + Also, obj itself might be a stub. */ + assert(!(id_copy->h_tid & GCFLAG_SMALLSTUB)); + if (!(id_copy->h_tid & GCFLAG_STUB) && !(obj->h_tid & GCFLAG_STUB)) { + assert(stmgc_size(id_copy) == stmgc_size(obj)); + } + + /* add the MOVED flag to 'obj' */ + obj->h_tid |= GCFLAG_MOVED; + + /* copy the object's content */ + size_t objsize; + if (obj->h_tid & GCFLAG_STUB) + objsize = sizeof(struct stm_stub_s); + else { + objsize = stmgc_size(obj); + assert(objsize > sizeof(struct stm_stub_s) - WORD); + } + dprintf(("copy %p over %p (%zd bytes)\n", obj, id_copy, objsize)); + memcpy(id_copy + 1, obj + 1, objsize - sizeof(struct stm_object_s)); + + /* copy the object's h_revision number */ + id_copy->h_revision = obj->h_revision; + + /* copy the STUB flag */ + id_copy->h_tid &= ~GCFLAG_STUB; + id_copy->h_tid |= (obj->h_tid & GCFLAG_STUB); + + return id_copy; +} + +static void visit_nonpublic(gcptr obj, struct tx_public_descriptor *gcp) +{ + /* Visit a protected or private object. 'gcp' must be either NULL or + point to the thread that has got the object. This 'gcp' is only an + optimization: it lets us trace (most) private/protected objects + and replace pointers to public objects in them with pointers to + private/protected objects if they are the most recent ones, + provided they belong to the same thread. + */ + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + assert(!(obj->h_tid & GCFLAG_STUB)); + assert(!(obj->h_tid & GCFLAG_HAS_ID)); + assert(!(obj->h_tid & GCFLAG_SMALLSTUB)); + assert(!(obj->h_tid & GCFLAG_MOVED)); + + if (obj->h_tid & GCFLAG_VISITED) + return; /* already visited */ + + obj->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; + gcptrlist_insert2(&objects_to_trace, obj, (gcptr)gcp); + + obj = (gcptr)obj->h_original; + if (obj != NULL) + obj->h_tid |= GCFLAG_MARKED; +} + +static gcptr visit_public(gcptr obj, struct tx_public_descriptor *gcp) +{ + /* The goal is to walk to the most recent copy, then copy its + content back into the h_original, and finally returns this + h_original. Or, if gcp != NULL and the most recent copy is + protected by precisely 'gcp', then we return it instead. + */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + + gcptr original; + if (obj->h_original != 0 && + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + original = (gcptr)obj->h_original; + /* the h_original may be protected, or private_from_protected, + in some cases. Then we can't use it. We'll use the most + recent h_revision which is public. */ + if (!(original->h_tid & GCFLAG_PUBLIC)) { + original->h_tid |= GCFLAG_MARKED; + original = NULL; } } + else + original = obj; + + /* the original object must not be a small stub. */ + assert(original == NULL || !(original->h_tid & GCFLAG_SMALLSTUB)); + + /* if 'original' was already visited, we are done */ + if (original != NULL && original->h_tid & GCFLAG_VISITED) + return original; + + /* walk to the head of the chained list */ + while (IS_POINTER(obj->h_revision)) { + if (!(obj->h_revision & 2)) { + obj = (gcptr)obj->h_revision; + assert(obj->h_tid & GCFLAG_PUBLIC); + continue; + } + + /* it's a stub: check the current stealing status */ + assert(obj->h_tid & GCFLAG_STUB); + gcptr obj2 = (gcptr)(obj->h_revision - 2); + + if (obj2->h_tid & GCFLAG_PUBLIC) { + /* the stub target itself was stolen, so is public now. + Continue looping from there. */ + obj = obj2; + continue; + } + + if (obj2->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* the stub target is a private_from_protected. */ + gcptr obj3 = (gcptr)obj2->h_revision; + if (obj3->h_tid & GCFLAG_PUBLIC) { + assert(!(obj3->h_tid & GCFLAG_BACKUP_COPY)); + /* the backup copy was stolen and is now a regular + public object. */ + obj = obj3; + continue; + } + else { + /* the backup copy was not stolen. Ignore this pair + obj2/obj3, and the head of the public chain is obj. + The pair obj2/obj3 was or will be handled by + mark_all_stack_roots(). */ + assert(obj3->h_tid & GCFLAG_BACKUP_COPY); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; + break; + } + } + else { + /* the stub target is just a protected object. + The head of the public chain is obj. We have to + explicitly keep obj2 alive. */ + assert(!IS_POINTER(obj2->h_revision)); + visit_nonpublic(obj2, STUB_THREAD(obj)); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; + break; + } + } + + /* at this point, 'obj' contains the most recent revision which is + public. */ + if (original == NULL) { + original = obj; + if (original->h_tid & GCFLAG_VISITED) + return original; + } + else if (obj != original) { + /* copy obj over original */ + copy_over_original(obj, original); + } + + /* return this original */ + original->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; + if (!(original->h_tid & GCFLAG_STUB)) + gcptrlist_insert2(&objects_to_trace, original, NULL); + return original; } -static void visit(gcptr *pobj) +static struct tx_public_descriptor *visit_protected_gcp; + +static void visit_take_protected(gcptr *pobj) { + /* Visits '*pobj', marking it as surviving and possibly adding it to + objects_to_trace. Fixes *pobj to point to the exact copy that + survived. This function will replace *pobj with a protected + copy if it belongs to the thread 'visit_protected_gcp', so the + latter must be initialized before any call! + */ gcptr obj = *pobj; if (obj == NULL) return; - restart: - if (obj->h_revision & 1) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_STUB)); - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); - - keep_original_alive(obj); - } - } - else if (obj->h_tid & GCFLAG_PUBLIC) { - /* h_revision is a ptr: we have a more recent version */ - gcptr prev_obj = obj; - - if (!(obj->h_revision & 2)) { - /* go visit the more recent version */ - obj = (gcptr)obj->h_revision; - } - else { - /* it's a stub: keep it if it points to a protected version, - because we need to keep the effect of stealing if it is - later accessed by the wrong thread. If it points to a - public object (possibly outdated), we can ignore the stub. - */ - assert(obj->h_tid & GCFLAG_STUB); - obj = (gcptr)(obj->h_revision - 2); - if (!(obj->h_tid & GCFLAG_PUBLIC)) { - prev_obj->h_tid |= GCFLAG_VISITED; - keep_original_alive(prev_obj); - - assert(*pobj == prev_obj); - gcptr obj1 = obj; - visit(&obj1); /* recursion, but should be only once */ - assert(prev_obj->h_tid & GCFLAG_STUB); - prev_obj->h_revision = ((revision_t)obj1) | 2; - return; - } - } - - if (!(obj->h_revision & 3)) { - /* obj is neither a stub nor a most recent revision: - completely ignore obj->h_revision */ - - obj = (gcptr)obj->h_revision; - assert(obj->h_tid & GCFLAG_PUBLIC); - prev_obj->h_revision = (revision_t)obj; - } - *pobj = obj; - goto restart; - } - else if (obj->h_tid & GCFLAG_VISITED) { - dprintf(("[already visited: %p]\n", obj)); - assert(obj == *pobj); - assert((obj->h_revision & 3) || /* either odd, or stub */ - (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - return; /* already seen */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + /* 'obj' is a private or protected copy. */ + visit_nonpublic(obj, visit_protected_gcp); } else { - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - gcptr B = (gcptr)obj->h_revision; - assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - - if (obj->h_original && (gcptr)obj->h_original != B) { - /* if B is original, it will be visited anyway */ - assert(obj->h_original == B->h_original); - assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - keep_original_alive(obj); - } - - obj->h_tid |= GCFLAG_VISITED; - B->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_STUB)); - assert(!(B->h_tid & GCFLAG_STUB)); - gcptrlist_insert2(&objects_to_trace, obj, B); - - if (IS_POINTER(B->h_revision)) { - assert(B->h_tid & GCFLAG_PUBLIC); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(B->h_revision & 2)); - - pobj = (gcptr *)&B->h_revision; - obj = *pobj; - goto restart; - } + *pobj = visit_public(obj, visit_protected_gcp); } } - -static void visit_keep(gcptr obj) +gcptr stmgcpage_visit(gcptr obj) { - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); - - if (IS_POINTER(obj->h_revision)) { - assert(!(obj->h_revision & 2)); - visit((gcptr *)&obj->h_revision); - } - keep_original_alive(obj); + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + visit_nonpublic(obj, NULL); } + else { + obj = visit_public(obj, NULL); + } + return obj; } static void visit_all_objects(void) { while (gcptrlist_size(&objects_to_trace) > 0) { + visit_protected_gcp = + (struct tx_public_descriptor *)gcptrlist_pop(&objects_to_trace); gcptr obj = gcptrlist_pop(&objects_to_trace); - stmgc_trace(obj, &visit); + stmgc_trace(obj, &visit_take_protected); } + visit_protected_gcp = NULL; } static void mark_prebuilt_roots(void) @@ -360,18 +442,20 @@ /* Note about prebuilt roots: 'stm_prebuilt_gcroots' is a list that contains all the ones that have been modified. Because they are themselves not in any page managed by this file, their - GCFLAG_VISITED will not be removed at the end of the current - collection. This is fine because the base object cannot contain - references to the heap. So we decided to systematically set - GCFLAG_VISITED on prebuilt objects. */ + GCFLAG_VISITED is not removed at the end of the current + collection. That's why we remove it here. GCFLAG_MARKED is not + relevant for prebuilt objects, but we avoid objects with MARKED + but not VISITED, which trigger some asserts. */ gcptr *pobj = stm_prebuilt_gcroots.items; gcptr *pend = stm_prebuilt_gcroots.items + stm_prebuilt_gcroots.size; - gcptr obj; + gcptr obj, obj2; for (; pobj != pend; pobj++) { obj = *pobj; + obj->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - assert(IS_POINTER(obj->h_revision)); - visit((gcptr *)&obj->h_revision); + + obj2 = visit_public(obj, NULL); + assert(obj2 == obj); /* it is its own original */ } } @@ -385,7 +469,7 @@ if (((revision_t)item) & ~((revision_t)END_MARKER_OFF | (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ - visit(root); + visit_take_protected(root); dprintf(("visit stack root: %p -> %p\n", item, *root)); } else if (item == END_MARKER_OFF) { @@ -398,15 +482,19 @@ static void mark_all_stack_roots(void) { struct tx_descriptor *d; + struct GcPtrList new_public_to_private; + memset(&new_public_to_private, 0, sizeof(new_public_to_private)); + for (d = stm_tx_head; d; d = d->tx_next) { assert(!stm_has_got_any_lock(d)); + visit_protected_gcp = d->public_descriptor; /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); /* the thread-local object */ - visit(d->thread_local_obj_ref); - visit(&d->old_thread_local_obj); + visit_take_protected(d->thread_local_obj_ref); + visit_take_protected(&d->old_thread_local_obj); /* the current transaction's private copies of public objects */ wlog_t *item; @@ -416,79 +504,88 @@ gcptr R = item->addr; gcptr L = item->val; - /* Objects that were not visited yet must have the PUB_TO_PRIV - flag. Except if that transaction will abort anyway, then it - may be removed from a previous major collection that didn't - fix the PUB_TO_PRIV because the transaction was going to - abort anyway: - 1. minor_collect before major collect (R->L, R is outdated, abort) - 2. major collect removes flag - 3. major collect again, same thread, no time to abort - 4. flag still removed - */ - assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, - R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - visit_keep(R); + /* we visit the public object R. Must keep a public object + here, so we pass NULL as second argument. */ + gcptr new_R = visit_public(R, NULL); + assert(new_R->h_tid & GCFLAG_PUBLIC); + + if (new_R != R) { + /* we have to update the key in public_to_private, which + can only be done by deleting the existing key and + (after the loop) re-inserting the new key. */ + G2L_LOOP_DELETE(item); + gcptrlist_insert2(&new_public_to_private, new_R, L); + } + + /* we visit the private copy L --- which at this point + should be private, possibly private_from_protected, + so visit() should return the same private copy */ if (L != NULL) { - /* minor collection found R->L in public_to_young - and R was modified. It then sets item->val to NULL and wants - to abort later. */ - revision_t v = L->h_revision; - visit_keep(L); - /* a bit of custom logic here: if L->h_revision used to - point exactly to R, as set by stealing, then we must - keep this property, even though visit_keep(L) might - decide it would be better to make it point to a more - recent copy. */ - if (v == (revision_t)R) { - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - L->h_revision = v; /* restore */ - } + visit_nonpublic(L, visit_protected_gcp); } + } G2L_LOOP_END; + /* reinsert to real pub_to_priv */ + long i, size = new_public_to_private.size; + gcptr *items = new_public_to_private.items; + for (i = 0; i < size; i += 2) { + g2l_insert(&d->public_to_private, items[i], items[i + 1]); + } + gcptrlist_clear(&new_public_to_private); + + /* the current transaction's private copies of protected objects */ + items = d->private_from_protected.items; + for (i = d->private_from_protected.size - 1; i >= 0; i--) { + gcptr obj = items[i]; + assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + visit_nonpublic(obj, visit_protected_gcp); + + gcptr backup_obj = (gcptr)obj->h_revision; + if (!(backup_obj->h_tid & GCFLAG_PUBLIC)) + visit_nonpublic(backup_obj, visit_protected_gcp); + else + obj->h_revision = (revision_t)visit_public(backup_obj, NULL); + } + /* make sure that the other lists are empty */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_young_stubs) == 0); + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); /* NOT NECESSARILY EMPTY: - list_of_read_objects - private_from_protected - public_to_private - - old_objects_to_trace */ assert(gcptrlist_size(&d->list_of_read_objects) == d->num_read_objects_known_old); assert(gcptrlist_size(&d->private_from_protected) == d->num_private_from_protected_known_old); } + + visit_protected_gcp = NULL; + gcptrlist_delete(&new_public_to_private); } static void cleanup_for_thread(struct tx_descriptor *d) { long i; gcptr *items; - - /* It can occur that 'private_from_protected' contains an object that - * has not been visited at all (maybe only in inevitable - * transactions). - */ - items = d->private_from_protected.items; - for (i = d->private_from_protected.size - 1; i >= 0; i--) { - gcptr obj = items[i]; - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - - if (!(obj->h_tid & GCFLAG_VISITED)) { - /* forget 'obj' */ - items[i] = items[--d->private_from_protected.size]; - } - } + assert(d->old_objects_to_trace.size == 0); /* If we're aborting this transaction anyway, we don't need to do * more here. */ - if (d->active < 0) - return; /* already "aborted" during forced minor collection */ + if (d->active < 0) { + /* already "aborted" during forced minor collection + clear list of read objects so that a possible minor collection + before the abort doesn't trip + fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; + return; + } if (d->active == 2) { /* inevitable transaction: clear the list of read objects */ @@ -498,25 +595,36 @@ items = d->list_of_read_objects.items; for (i = d->list_of_read_objects.size - 1; i >= 0; --i) { gcptr obj = items[i]; - assert(!(obj->h_tid & GCFLAG_STUB)); - /* Warning: in case the object listed is outdated and has been - replaced with a more recent revision, then it might be the - case that obj->h_revision doesn't have GCFLAG_VISITED, but - just removing it is very wrong --- we want 'd' to abort. - */ - if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + if (obj->h_tid & GCFLAG_MOVED) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(IS_POINTER(obj->h_original)); + obj = (gcptr)obj->h_original; + items[i] = obj; + } + else if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* Warning: in case the object listed is outdated and has been + replaced with a more recent revision, then it might be the + case that obj->h_revision doesn't have GCFLAG_VISITED, but + just removing it is very wrong --- we want 'd' to abort. + */ /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; + + /* the backup-ptr should already be updated: */ + assert(!(obj->h_tid & GCFLAG_MOVED)); } revision_t v = obj->h_revision; - if (IS_POINTER(v)) { + if ((obj->h_tid & GCFLAG_STUB) || IS_POINTER(v)) { /* has a more recent revision. Oups. */ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); AbortTransactionAfterCollect(d, ABRT_COLLECT_MAJOR); + /* fix_list_of_read_objects should not run */ + gcptrlist_clear(&d->list_of_read_objects); + d->num_read_objects_known_old = 0; return; } @@ -551,7 +659,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { assert(item->addr->h_tid & GCFLAG_VISITED); assert(item->val->h_tid & GCFLAG_VISITED); - + assert(!(item->addr->h_tid & GCFLAG_MOVED)); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, which becomes: */ @@ -590,7 +698,9 @@ and the flag is removed; other locations are marked as free. */ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) break; /* first object that stays alive */ p = (gcptr)(((char *)p) + obj_size); } @@ -600,8 +710,10 @@ surviving_pages = lpage; p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); mc_total_in_use += obj_size; } else { @@ -627,6 +739,7 @@ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { assert(!(p->h_tid & GCFLAG_VISITED)); + assert(!(p->h_tid & GCFLAG_MARKED)); if (p->h_tid != DEBUG_WORD(0xDD)) { dprintf(("| freeing %p (with page %p)\n", p, lpage)); } @@ -656,8 +769,10 @@ G2L_LOOP_FORWARD(gcp->nonsmall_objects, item) { gcptr p = item->addr; - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); } else { G2L_LOOP_DELETE(item); @@ -775,9 +890,13 @@ assert(gcptrlist_size(&objects_to_trace) == 0); mark_prebuilt_roots(); mark_all_stack_roots(); - visit_all_objects(); + do { + visit_all_objects(); + stm_visit_old_weakrefs(); + } while (gcptrlist_size(&objects_to_trace) != 0); gcptrlist_delete(&objects_to_trace); clean_up_lists_of_read_objects_and_fix_outdated_flags(); + stm_clean_old_weakrefs(); mc_total_in_use = mc_total_reserved = 0; free_all_unused_local_pages(); diff --git a/c4/gcpage.h b/c4/gcpage.h --- a/c4/gcpage.h +++ b/c4/gcpage.h @@ -45,7 +45,8 @@ /* These fields are in tx_public_descriptor rather than tx_descriptor. The indirection allows us to keep around the lists of pages even - after the thread finishes, until the next major collection. + after the thread finishes. Such a "zombie" tx_public_descriptor + is reused by the next thread that starts. */ #define GCPAGE_FIELDS_DECL \ /* The array 'pages_for_size' contains GC_SMALL_REQUESTS \ @@ -65,7 +66,10 @@ /* A set of all non-small objects (outside the nursery). \ We could also have a single global set, but this avoids \ locking in stmgcpage_malloc/free. */ \ - struct G2L nonsmall_objects; + struct G2L nonsmall_objects; \ + \ + /* Weakref support */ \ + struct GcPtrList old_weakrefs; #define LOCAL_GCPAGES() (thread_descriptor->public_descriptor) @@ -80,6 +84,7 @@ void stmgcpage_add_prebuilt_root(gcptr obj); void stmgcpage_possibly_major_collect(int force); long stmgcpage_count(int quantity); +gcptr stmgcpage_visit(gcptr); extern struct GcPtrList stm_prebuilt_gcroots; diff --git a/c4/lists.c b/c4/lists.c --- a/c4/lists.c +++ b/c4/lists.c @@ -18,7 +18,7 @@ void g2l_delete(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); memset(g2l, 0, sizeof(struct G2L)); } @@ -56,7 +56,7 @@ long alloc = g2l->raw_end - g2l->raw_start; long newalloc = (alloc + extra + (alloc >> 2) + 31) & ~15; //fprintf(stderr, "growth: %ld\n", newalloc); - char *newitems = malloc(newalloc); + char *newitems = stm_malloc(newalloc); newg2l.raw_start = newitems; newg2l.raw_current = newitems; newg2l.raw_end = newitems + newalloc; @@ -65,7 +65,7 @@ { g2l_insert(&newg2l, item->addr, item->val); } G2L_LOOP_END; - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); *g2l = newg2l; } @@ -151,7 +151,7 @@ //fprintf(stderr, "list %p deleted (%ld KB)\n", //gcptrlist, gcptrlist->alloc * sizeof(gcptr) / 1024); gcptrlist->size = 0; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = NULL; gcptrlist->alloc = 0; } @@ -162,7 +162,8 @@ return; size_t nsize = gcptrlist->size * sizeof(gcptr); - gcptr *newitems = realloc(gcptrlist->items, nsize); + gcptr *newitems = stm_realloc(gcptrlist->items, nsize, + gcptrlist->alloc * sizeof(gcptr)); if (newitems != NULL || nsize == 0) { gcptrlist->items = newitems; @@ -177,11 +178,11 @@ //fprintf(stderr, "list %p growth to %ld items (%ld KB)\n", // gcptrlist, newalloc, newalloc * sizeof(gcptr) / 1024); - gcptr *newitems = malloc(newalloc * sizeof(gcptr)); + gcptr *newitems = stm_malloc(newalloc * sizeof(gcptr)); long i; for (i=0; isize; i++) newitems[i] = gcptrlist->items[i]; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = newitems; gcptrlist->alloc = newalloc; } diff --git a/c4/lists.h b/c4/lists.h --- a/c4/lists.h +++ b/c4/lists.h @@ -1,6 +1,8 @@ #ifndef _SRCSTM_LISTS_H #define _SRCSTM_LISTS_H +#include "dbgmem.h" + /************************************************************/ /* The g2l_xx functions ("global_to_local") are implemented as a tree, @@ -36,7 +38,7 @@ void g2l_clear(struct G2L *g2l); void g2l_delete(struct G2L *g2l); static inline void g2l_delete_not_used_any_more(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); } static inline int g2l_any_entry(struct G2L *g2l) { diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -1,7 +1,6 @@ #include "stmimpl.h" - -static int is_in_nursery(struct tx_descriptor *d, gcptr obj) +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj) { return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); } @@ -54,6 +53,7 @@ gcptrlist_delete(&d->old_objects_to_trace); gcptrlist_delete(&d->public_with_young_copy); + gcptrlist_delete(&d->young_weakrefs); } void stmgc_minor_collect_soon(void) @@ -100,6 +100,13 @@ return P; } +gcptr stm_allocate_immutable(size_t size, unsigned long tid) +{ + gcptr P = stm_allocate(size, tid); + P->h_tid |= GCFLAG_IMMUTABLE; + return P; +} + gcptr stmgc_duplicate(gcptr P) { size_t size = stmgc_size(P); @@ -129,7 +136,7 @@ static inline gcptr create_old_object_copy(gcptr obj) { assert(!(obj->h_tid & GCFLAG_PUBLIC)); - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); assert(!(obj->h_tid & GCFLAG_VISITED)); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); @@ -147,14 +154,14 @@ gcptr fresh_old_copy; struct tx_descriptor *d = thread_descriptor; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* not a nursery object */ } else { /* it's a nursery object. Was it already moved? */ - if (UNLIKELY(obj->h_tid & GCFLAG_NURSERY_MOVED)) { + if (UNLIKELY(obj->h_tid & GCFLAG_MOVED)) { /* yes. Such an object can be a public object in the nursery - too (such objects are always NURSERY_MOVED). For all cases, + too (such objects are always MOVED). For all cases, we can just fix the ref. Can be stolen objects or those we already moved. */ @@ -175,7 +182,7 @@ fresh_old_copy = create_old_object_copy(obj); } - obj->h_tid |= GCFLAG_NURSERY_MOVED; + obj->h_tid |= GCFLAG_MOVED; obj->h_revision = (revision_t)fresh_old_copy; /* fix the original reference */ @@ -389,17 +396,17 @@ for (i = d->list_of_read_objects.size - 1; i >= limit; --i) { gcptr obj = items[i]; - if (!is_in_nursery(d, obj)) { + if (!stmgc_is_in_nursery(d, obj)) { /* non-young or visited young objects are kept */ continue; } - else if (obj->h_tid & GCFLAG_NURSERY_MOVED) { + else if (obj->h_tid & GCFLAG_MOVED) { /* visited nursery objects are kept and updated */ items[i] = (gcptr)obj->h_revision; assert(!(items[i]->h_tid & GCFLAG_STUB)); continue; } - /* Sanity check: a nursery object without the NURSERY_MOVED flag + /* Sanity check: a nursery object without the MOVED flag is necessarily a private-without-backup object, or a protected object; it cannot be a public object. */ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); @@ -423,6 +430,7 @@ { assert(gcptrlist_size(&d->old_objects_to_trace) == 0); assert(gcptrlist_size(&d->public_with_young_copy) == 0); + assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); spinlock_release(d->public_descriptor->collection_lock); @@ -438,7 +446,7 @@ setup_minor_collect(d); /* first do this, which asserts that some objects are private --- - which fails if they have already been GCFLAG_NURSERY_MOVED */ + which fails if they have already been GCFLAG_MOVED */ mark_public_to_young(d); mark_young_roots(d); @@ -458,6 +466,8 @@ surviving young-but-outside-the-nursery objects have been flagged with GCFLAG_OLD */ + stm_move_young_weakrefs(d); + teardown_minor_collect(d); assert(!stm_has_got_any_lock(d)); @@ -524,6 +534,7 @@ !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); + assert(gcptrlist_size(&d->young_weakrefs) == 0); assert(gcptrlist_size(&d->list_of_read_objects) >= d->num_read_objects_known_old); assert(gcptrlist_size(&d->private_from_protected) >= diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -50,7 +50,10 @@ still in the same transaction, to know that the initial \ part of the lists cannot contain young objects any more. */ \ long num_private_from_protected_known_old; \ - long num_read_objects_known_old; + long num_read_objects_known_old; \ + \ + /* Weakref support */ \ + struct GcPtrList young_weakrefs; struct tx_descriptor; /* from et.h */ @@ -64,5 +67,6 @@ size_t stmgc_size(gcptr); void stmgc_trace(gcptr, void visit(gcptr *)); void stmgc_minor_collect_soon(void); +int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj); #endif diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -23,9 +23,56 @@ { gcptr stub, obj = *pobj; if (obj == NULL || (obj->h_tid & (GCFLAG_PUBLIC | GCFLAG_OLD)) == - (GCFLAG_PUBLIC | GCFLAG_OLD)) + (GCFLAG_PUBLIC | GCFLAG_OLD)) return; + if (obj->h_tid & GCFLAG_IMMUTABLE) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + if (obj->h_tid & GCFLAG_PUBLIC) { + /* young public, replace with stolen old copy */ + assert(obj->h_tid & GCFLAG_MOVED); + assert(IS_POINTER(obj->h_revision)); + stub = (gcptr)obj->h_revision; + assert(!IS_POINTER(stub->h_revision)); /* not outdated */ + goto done; + } + + /* old or young protected! mark as PUBLIC */ + if (!(obj->h_tid & GCFLAG_OLD)) { + /* young protected */ + gcptr O; + + if (obj->h_tid & GCFLAG_HAS_ID) { + /* use id-copy for us */ + O = (gcptr)obj->h_original; + obj->h_tid &= ~GCFLAG_HAS_ID; + stm_copy_to_old_id_copy(obj, O); + O->h_original = 0; + } else { + O = stmgc_duplicate_old(obj); + + /* young and without original? */ + if (!(obj->h_original)) + obj->h_original = (revision_t)O; + } + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); + obj->h_revision = (revision_t)O; + + O->h_tid |= GCFLAG_PUBLIC; + /* here it is fine if it stays in read caches because + the object is immutable anyway and there are no + write_barriers allowed. */ + dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O)); + stub = O; + goto done; + } + /* old protected: */ + dprintf(("prot immutable -> public: %p\n", obj)); + obj->h_tid |= GCFLAG_PUBLIC; + + return; + } + /* we use 'all_stubs', a dictionary, in order to try to avoid duplicate stubs for the same object. XXX maybe it would be better to use a fast approximative cache that stays around for @@ -57,6 +104,8 @@ stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; + if (size == 0) + stub->h_tid |= GCFLAG_SMALLSTUB; stub->h_revision = ((revision_t)obj) | 2; if (obj->h_original) { stub->h_original = obj->h_original; @@ -158,7 +207,7 @@ /* note that we should follow h_revision at least one more step: it is necessary if L is public but young (and then - has GCFLAG_NURSERY_MOVED), but it is fine to do it more + has GCFLAG_MOVED), but it is fine to do it more generally. */ v = ACCESS_ONCE(L->h_revision); if (IS_POINTER(v)) { @@ -191,7 +240,7 @@ } L->h_revision = (revision_t)O; - L->h_tid |= GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED; + L->h_tid |= GCFLAG_PUBLIC | GCFLAG_MOVED; /* subtle: we need to remove L from the fxcache of the target thread, otherwise its read barrier might not trigger on it. It is mostly fine because it is anyway identical to O. But diff --git a/c4/stmgc.c b/c4/stmgc.c --- a/c4/stmgc.c +++ b/c4/stmgc.c @@ -10,5 +10,6 @@ #include "gcpage.c" #include "stmsync.c" #include "extra.c" +#include "weakref.c" #include "dbgmem.c" #include "fprintcolor.c" diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -23,12 +23,16 @@ #define STM_SIZE_OF_USER_TID (sizeof(revision_t) / 2) /* in bytes */ #define STM_FIRST_GCFLAG (1L << (8 * STM_SIZE_OF_USER_TID)) #define STM_USER_TID_MASK (STM_FIRST_GCFLAG - 1) -#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 4 + 8)) +#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * ((1<<0) | (1<<1) | \ + (1<<2) | (1<<3) | (1<<13))) #define PREBUILT_REVISION 1 /* allocate an object out of the local nursery */ gcptr stm_allocate(size_t size, unsigned long tid); +/* allocate an object that is be immutable. it cannot be changed with + a stm_write_barrier() or after the next commit */ +gcptr stm_allocate_immutable(size_t size, unsigned long tid); /* returns a never changing hash for the object */ revision_t stm_hash(gcptr); @@ -122,6 +126,14 @@ void stm_minor_collect(void); void stm_major_collect(void); +/* weakref support: allocate a weakref object, and set it to point + weakly to 'obj'. The weak pointer offset is hard-coded to be at + 'size - WORD'. Important: stmcb_trace() must NOT trace it. + Weakrefs are *immutable*! Don't attempt to use stm_write_barrier() + on them. */ +gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); + + /**************** END OF PUBLIC INTERFACE *****************/ /************************************************************/ diff --git a/c4/stmimpl.h b/c4/stmimpl.h --- a/c4/stmimpl.h +++ b/c4/stmimpl.h @@ -36,5 +36,6 @@ #include "steal.h" #include "stmsync.h" #include "extra.h" +#include "weakref.h" #endif diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -52,7 +52,7 @@ static void init_shadowstack(void) { struct tx_descriptor *d = thread_descriptor; - d->shadowstack = malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); + d->shadowstack = stm_malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); if (!d->shadowstack) { stm_fatalerror("out of memory: shadowstack\n"); } @@ -68,7 +68,7 @@ assert(x == END_MARKER_ON); assert(stm_shadowstack == d->shadowstack); stm_shadowstack = NULL; - free(d->shadowstack); + stm_free(d->shadowstack, sizeof(gcptr) * LENGTH_SHADOW_STACK); } void stm_set_max_aborts(int max_aborts) diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -11,11 +11,11 @@ header_files = [os.path.join(parent_dir, _n) for _n in "et.h lists.h steal.h nursery.h gcpage.h " - "stmsync.h extra.h dbgmem.h fprintcolor.h " + "stmsync.h extra.h weakref.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in "et.c lists.c steal.c nursery.c gcpage.c " - "stmsync.c extra.c dbgmem.c fprintcolor.c".split()] + "stmsync.c extra.c weakref.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -46,7 +46,7 @@ #define PREBUILT_FLAGS ... #define PREBUILT_REVISION ... - gcptr stm_allocate(size_t size, unsigned int tid); + gcptr stm_allocate(size_t size, unsigned long tid); revision_t stm_hash(gcptr); revision_t stm_id(gcptr); _Bool stm_pointer_equal(gcptr, gcptr); @@ -69,6 +69,7 @@ void stm_abort_info_pop(long count); char *stm_inspect_abort_info(void); void stm_abort_and_retry(void); + gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj); /* extra non-public code */ void printfcolor(char *msg); @@ -129,10 +130,11 @@ #define GCFLAG_BACKUP_COPY ... #define GCFLAG_PUBLIC_TO_PRIVATE ... #define GCFLAG_WRITE_BARRIER ... - #define GCFLAG_NURSERY_MOVED ... + #define GCFLAG_MOVED ... #define GCFLAG_STUB ... #define GCFLAG_PRIVATE_FROM_PROTECTED ... #define GCFLAG_HAS_ID ... + #define GCFLAG_IMMUTABLE ... #define ABRT_MANUAL ... typedef struct { ...; } page_header_t; ''') @@ -164,14 +166,18 @@ gcptr rawgetptr(gcptr obj, long index) { - assert(gettid(obj) > 42142 + index); + revision_t t = gettid(obj); + if (t == 42142) t++; + assert(t > 42142 + index); return ((gcptr *)(obj + 1))[index]; } void rawsetptr(gcptr obj, long index, gcptr newvalue) { fprintf(stderr, "%p->[%ld] = %p\n", obj, index, newvalue); - assert(gettid(obj) > 42142 + index); + revision_t t = gettid(obj); + if (t == 42142) t++; + assert(t > 42142 + index); ((gcptr *)(obj + 1))[index] = newvalue; } @@ -232,7 +238,8 @@ gcptr pseudoprebuilt(size_t size, int tid) { - gcptr x = calloc(1, size); + gcptr x = stm_malloc(size); + memset(x, 0, size); x->h_tid = PREBUILT_FLAGS | tid; x->h_revision = PREBUILT_REVISION; return x; @@ -282,6 +289,8 @@ else { int nrefs = gettid(obj) - 42142; assert(nrefs < 100); + if (nrefs == 0) /* weakrefs */ + nrefs = 1; return sizeof(*obj) + nrefs * sizeof(gcptr); } } @@ -484,7 +493,7 @@ def oalloc_refs(nrefs): """Allocate an 'old' protected object, outside any nursery, with nrefs pointers""" - size = HDR + WORD * nrefs + size = HDR + WORD * (nrefs or 1) p = lib.stmgcpage_malloc(size) lib.memset(p, 0, size) p.h_tid = GCFLAG_OLD | GCFLAG_WRITE_BARRIER @@ -506,9 +515,9 @@ def nalloc_refs(nrefs): "Allocate a fresh object from the nursery, with nrefs pointers" - p = lib.stm_allocate(HDR + WORD * nrefs, 42142 + nrefs) + p = lib.stm_allocate(HDR + WORD * (nrefs or 1), 42142 + nrefs) assert p.h_revision == lib.get_private_rev_num() - for i in range(nrefs): + for i in range(nrefs or 1): assert rawgetptr(p, i) == ffi.NULL # must already be zero-filled return p @@ -524,9 +533,9 @@ def palloc_refs(nrefs, prehash=None): "Get a ``prebuilt'' object with nrefs pointers." if prehash is None: - p = lib.pseudoprebuilt(HDR + WORD * nrefs, 42142 + nrefs) + p = lib.pseudoprebuilt(HDR + WORD * (nrefs or 1), 42142 + nrefs) else: - p = lib.pseudoprebuilt_with_hash(HDR + WORD * nrefs, + p = lib.pseudoprebuilt_with_hash(HDR + WORD * (nrefs or 1), 42142 + nrefs, prehash) return p @@ -577,17 +586,20 @@ def delegate(p1, p2): assert classify(p1) == "public" assert classify(p2) == "public" + assert lib.gettid(p1) != 42 + HDR and lib.gettid(p2) == lib.gettid(p1) p1.h_revision = ffi.cast("revision_t", p2) p1.h_tid |= GCFLAG_PUBLIC_TO_PRIVATE if p1.h_tid & GCFLAG_PREBUILT_ORIGINAL: lib.stm_add_prebuilt_root(p1) - -def delegate_original(p1, p2): - assert p1.h_original == 0 + # no h_original or it is a prebuilt with a specified hash in h_original assert p2.h_original == 0 assert p1 != p2 - p2.h_original = ffi.cast("revision_t", p1) - + assert p1.h_tid & GCFLAG_OLD + assert p2.h_tid & GCFLAG_OLD + if (p1.h_original == 0) or (p1.h_tid & GCFLAG_PREBUILT_ORIGINAL): + p2.h_original = ffi.cast("revision_t", p1) + else: + p2.h_original = p1.h_original def make_public(p1): """Hack at an object returned by oalloc() to force it public.""" @@ -643,9 +655,9 @@ return "stub" else: # public objects usually never live in the nursery, but - # if stealing makes one, it has GCFLAG_NURSERY_MOVED. + # if stealing makes one, it has GCFLAG_MOVED. if lib.in_nursery(p): - assert p.h_tid & GCFLAG_NURSERY_MOVED + assert p.h_tid & GCFLAG_MOVED assert not (p.h_revision & 1) # "is a pointer" return "public" if backup: @@ -684,5 +696,8 @@ should_break_transaction = lib.stm_should_break_transaction - +WEAKREF_SIZE = HDR + WORD +WEAKREF_TID = 42142 + + nrb_protected = ffi.cast("gcptr", -1) diff --git a/c4/test/test_abort.py b/c4/test/test_abort.py --- a/c4/test/test_abort.py +++ b/c4/test/test_abort.py @@ -37,7 +37,7 @@ assert seen == range(5000) def test_global_to_local_copies(): - p1 = palloc(HDR) + p1 = palloc(HDR + WORD) # @perform_transaction def run(retry_counter): diff --git a/c4/test/test_atomic.py b/c4/test/test_atomic.py --- a/c4/test/test_atomic.py +++ b/c4/test/test_atomic.py @@ -20,7 +20,7 @@ def test_set_transaction_length(): lib.stm_set_transaction_length(5) # breaks after 4 read-or-writes - plist = [palloc(HDR) for i in range(6)] + plist = [palloc(HDR + WORD) for i in range(6)] should_br = ['?'] * (len(plist) + 1) # @perform_transaction diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -45,7 +45,7 @@ assert classify(p) == "protected" def test_private_with_backup(): - p = nalloc(HDR) + p = nalloc(HDR + WORD) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() r2 = lib.get_private_rev_num() @@ -80,15 +80,12 @@ def test_prebuilt_is_public(): p = palloc(HDR) assert p.h_revision == 1 - assert p.h_tid == lib.gettid(p) | (GCFLAG_OLD | - GCFLAG_VISITED | - GCFLAG_PUBLIC | - GCFLAG_PREBUILT_ORIGINAL) + assert p.h_tid == lib.gettid(p) | lib.PREBUILT_FLAGS assert classify(p) == "public" assert lib.stm_id(p) != 0 def test_prebuilt_object_to_private(): - p = palloc(HDR) + p = palloc(HDR + WORD) flags = p.h_tid assert (flags & GCFLAG_PUBLIC_TO_PRIVATE) == 0 pid = lib.stm_id(p) @@ -158,7 +155,7 @@ assert p1.h_revision == int(ffi.cast("revision_t", p3)) # shortcutted def test_read_barrier_public_to_private(): - p = palloc(HDR) + p = palloc(HDR + WORD) pid = lib.stm_id(p) p2 = lib.stm_write_barrier(p) assert p2 != p @@ -173,7 +170,7 @@ assert pid == lib.stm_id(p2) def test_read_barrier_handle_protected(): - p = palloc(HDR) + p = palloc(HDR + WORD) p2 = lib.stm_write_barrier(p) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() @@ -188,7 +185,7 @@ assert list_of_read_objects() == [p2] def test_read_barrier_handle_private(): - p = palloc(HDR) + p = palloc(HDR + WORD) p2 = lib.stm_write_barrier(p) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() @@ -243,6 +240,21 @@ assert getptr(pr, 0) != r assert getptr(pr, 0) == r2 + # the following shouldn't be done + # because pw was not saved. Just + # here to check that pw gets removed + # from old_objects_to_trace when not found + # on the root stack anymore + rawsetptr(pw, 0, q) + lib.stm_push_root(q) + minor_collect() + q2 = lib.stm_pop_root() + check_nursery_free(q) + pr = lib.stm_read_barrier(p) + assert q != q2 + assert getptr(pr, 0) == q + assert getptr(pr, 0) != q2 + def test_write_barrier_after_minor_collect_young_to_old(): py.test.skip("should fail now") p = nalloc_refs(1) @@ -287,14 +299,14 @@ assert porig == ffi.NULL # } - p1 = oalloc(HDR) + p1 = oalloc(HDR + WORD) p1id = lib.stm_id(p1) p1r = lib.stm_read_barrier(p1) assert lib.stm_id(p1r) == p1id p1w = lib.stm_write_barrier(p1) assert lib.stm_id(p1w) == p1id - p2 = oalloc(HDR) + p2 = oalloc(HDR + WORD) p2w = lib.stm_write_barrier(p2) p2id = lib.stm_id(p2w) assert p2id == lib.stm_id(p2) @@ -390,7 +402,7 @@ assert p2 == lib.stm_read_barrier(p) assert p2 != plist[-1] # p2 is a public moved-out-of-nursery assert plist[-1].h_tid & GCFLAG_PUBLIC - assert plist[-1].h_tid & GCFLAG_NURSERY_MOVED + assert plist[-1].h_tid & GCFLAG_MOVED assert plist[-1].h_revision == int(ffi.cast("revision_t", p2)) assert classify(p2) == "public" r.set(3) @@ -566,8 +578,8 @@ pid = [] rid = [] From noreply at buildbot.pypy.org Sat Jul 27 16:35:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 16:35:56 +0200 (CEST) Subject: [pypy-commit] stmgc copy-over-original2: Close the merged branch Message-ID: <20130727143556.27EB11C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: copy-over-original2 Changeset: r465:4c2ee60e610d Date: 2013-07-27 16:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/4c2ee60e610d/ Log: Close the merged branch From noreply at buildbot.pypy.org Sat Jul 27 16:35:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 16:35:57 +0200 (CEST) Subject: [pypy-commit] stmgc weakref: Close the merged branch Message-ID: <20130727143557.4F34B1C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: weakref Changeset: r466:9ddc54926be9 Date: 2013-07-27 16:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/9ddc54926be9/ Log: Close the merged branch From noreply at buildbot.pypy.org Sat Jul 27 16:44:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 16:44:08 +0200 (CEST) Subject: [pypy-commit] stmgc default: Print 'Test OK!' when exiting normally, to be sure Message-ID: <20130727144408.D492B1C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r467:c528da482152 Date: 2013-07-27 16:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/c528da482152/ Log: Print 'Test OK!' when exiting normally, to be sure diff --git a/c4/demo_random.c b/c4/demo_random.c --- a/c4/demo_random.c +++ b/c4/demo_random.c @@ -765,6 +765,7 @@ newthread(demo, NULL); } } - + + printf("Test OK!\n"); return 0; } From noreply at buildbot.pypy.org Sat Jul 27 16:51:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 16:51:18 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Import stmgc/c528da482152 Message-ID: <20130727145118.46CB71C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65714:299d3aac450a Date: 2013-07-27 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/299d3aac450a/ Log: Import stmgc/c528da482152 diff --git a/rpython/translator/stm/src_stm/dbgmem.c b/rpython/translator/stm/src_stm/dbgmem.c --- a/rpython/translator/stm/src_stm/dbgmem.c +++ b/rpython/translator/stm/src_stm/dbgmem.c @@ -9,7 +9,7 @@ #ifdef _GC_DEBUG /************************************************************/ -#define MMAP_TOTAL 671088640 /* 640MB */ +#define MMAP_TOTAL 1280*1024*1024 /* 1280MB */ static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; static char *zone_start, *zone_current = NULL, *zone_end = NULL; @@ -71,6 +71,10 @@ void stm_free(void *p, size_t sz) { + if (p == NULL) { + assert(sz == 0); + return; + } assert(((intptr_t)((char *)p + sz) & (PAGE_SIZE-1)) == 0); size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; @@ -84,6 +88,14 @@ _stm_dbgmem(p, sz, PROT_NONE); } +void *stm_realloc(void *p, size_t newsz, size_t oldsz) +{ + void *r = stm_malloc(newsz); + memcpy(r, p, oldsz < newsz ? oldsz : newsz); + stm_free(p, oldsz); + return r; +} + int _stm_can_access_memory(char *p) { long base = ((char *)p - zone_start) / PAGE_SIZE; diff --git a/rpython/translator/stm/src_stm/dbgmem.h b/rpython/translator/stm/src_stm/dbgmem.h --- a/rpython/translator/stm/src_stm/dbgmem.h +++ b/rpython/translator/stm/src_stm/dbgmem.h @@ -7,6 +7,7 @@ void *stm_malloc(size_t); void stm_free(void *, size_t); +void *stm_realloc(void *, size_t, size_t); int _stm_can_access_memory(char *); void assert_cleared(char *, size_t); @@ -14,6 +15,7 @@ #define stm_malloc(sz) malloc(sz) #define stm_free(p,sz) free(p) +#define stm_realloc(p,newsz,oldsz) realloc(p,newsz) #define assert_cleared(p,sz) do { } while(0) #endif diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -146,7 +146,7 @@ gcptr P_prev = P; P = (gcptr)v; assert((P->h_tid & GCFLAG_PUBLIC) || - (P_prev->h_tid & GCFLAG_NURSERY_MOVED)); + (P_prev->h_tid & GCFLAG_MOVED)); v = ACCESS_ONCE(P->h_revision); @@ -238,7 +238,7 @@ add_in_recent_reads_cache: /* The risks are that the following assert fails, because the flag was added just now by a parallel thread during stealing... */ - /*assert(!(P->h_tid & GCFLAG_NURSERY_MOVED));*/ + /*assert(!(P->h_tid & GCFLAG_MOVED));*/ fxcache_add(&d->recent_reads_cache, P); return P; @@ -281,7 +281,7 @@ */ if (P->h_tid & GCFLAG_PUBLIC) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) { P = (gcptr)P->h_revision; assert(P->h_tid & GCFLAG_PUBLIC); @@ -413,7 +413,7 @@ while (v = P->h_revision, IS_POINTER(v)) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) dprintf(("nursery_moved ")); if (v & 2) @@ -510,7 +510,7 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { assert(R->h_tid & GCFLAG_PUBLIC); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); #ifdef _GC_DEBUG wlog_t *entry; @@ -570,6 +570,13 @@ gcptr stm_WriteBarrier(gcptr P) { assert(!(P->h_tid & GCFLAG_IMMUTABLE)); + assert((P->h_tid & GCFLAG_STUB) || + stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); + /* If stmgc_size(P) gives a number <= sizeof(stub)-WORD, then there is a + risk of overrunning the object later in gcpage.c when copying a stub + over it. However such objects are so small that they contain no field + at all, and so no write barrier should occur on them. */ + if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into @@ -606,7 +613,7 @@ Add R into the list 'public_with_young_copy', unless W is actually an old object, in which case we need to record W. */ - if (R->h_tid & GCFLAG_NURSERY_MOVED) + if (R->h_tid & GCFLAG_MOVED) { /* Bah, the object turned into this kind of stub, possibly while we were waiting for the collection_lock, because it @@ -696,8 +703,8 @@ continue; } } - else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) - == (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) + else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_MOVED)) + == (GCFLAG_PUBLIC | GCFLAG_MOVED)) { /* such an object is identical to the one it points to (stolen protected young object with h_revision pointing @@ -970,6 +977,7 @@ revision_t my_lock = d->my_lock; wlog_t *item; + dprintf(("acquire_locks\n")); assert(!stm_has_got_any_lock(d)); assert(d->public_descriptor->stolen_objects.size == 0); @@ -982,6 +990,7 @@ revision_t v; retry: assert(R->h_tid & GCFLAG_PUBLIC); + assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); v = ACCESS_ONCE(R->h_revision); if (IS_POINTER(v)) /* "has a more recent revision" */ { @@ -1014,7 +1023,7 @@ static void CancelLocks(struct tx_descriptor *d) { wlog_t *item; - + dprintf(("cancel_locks\n")); if (!g2l_any_entry(&d->public_to_private)) return; @@ -1107,7 +1116,7 @@ assert(!(L->h_tid & GCFLAG_VISITED)); assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - assert(!(L->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(L->h_tid & GCFLAG_MOVED)); assert(L->h_revision != localrev); /* modified by AcquireLocks() */ #ifdef DUMP_EXTRA @@ -1119,7 +1128,9 @@ gcptr stub = stm_stub_malloc(d->public_descriptor, 0); stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB + | GCFLAG_SMALLSTUB | GCFLAG_OLD; + dprintf(("et.c: stm_stub_malloc -> %p\n", stub)); stub->h_revision = ((revision_t)L) | 2; assert(!(L->h_tid & GCFLAG_HAS_ID)); @@ -1154,7 +1165,7 @@ assert(R->h_tid & GCFLAG_PUBLIC); assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); assert(R->h_revision != localrev); #ifdef DUMP_EXTRA @@ -1249,7 +1260,7 @@ assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); P->h_tid |= GCFLAG_PUBLIC; assert(!(P->h_tid & GCFLAG_HAS_ID)); - if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_NURSERY_MOVED; + if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_MOVED; /* P becomes a public outdated object. It may create an exception documented in doc-objects.txt: a public but young object. It's still fine because it should only be seen by @@ -1282,7 +1293,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - + dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); @@ -1366,6 +1377,7 @@ d->active = 2; d->reads_size_limit_nonatomic = 0; update_reads_size_limit(d); + dprintf(("make_inevitable(%p)\n", d)); } static revision_t acquire_inev_mutex_and_mark_global_cur_time( diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -26,7 +26,11 @@ * * GCFLAG_OLD is set on old objects. * - * GCFLAG_VISITED is used temporarily during major collections. + * GCFLAG_VISITED and GCFLAG_MARKED are used temporarily during major + * collections. The objects are MARKED|VISITED as soon as they have been + * added to 'objects_to_trace', and so will be or have been traced. The + * objects are only MARKED if their memory must be kept alive, but (so far) + * we found that tracing them is not useful. * * GCFLAG_PUBLIC is set on public objects. * @@ -47,7 +51,7 @@ * the list 'old_objects_to_trace'; it is set again at the next minor * collection. * - * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. + * GCFLAG_MOVED is used temporarily during minor/major collections. * * GCFLAG_STUB is set for debugging on stub objects made by stealing or * by major collections. 'p_stub->h_revision' might be a value @@ -68,16 +72,20 @@ static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_NURSERY_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; +static const revision_t GCFLAG_SMALLSTUB /*debug*/ = STM_FIRST_GCFLAG << 12; +static const revision_t GCFLAG_MARKED = STM_FIRST_GCFLAG << 13; +/* warning, the last flag available is "<< 15" on 32-bit */ /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ + GCFLAG_MARKED | \ GCFLAG_PREBUILT_ORIGINAL | \ GCFLAG_OLD | \ GCFLAG_PUBLIC) @@ -88,12 +96,14 @@ "PREBUILT_ORIGINAL", \ "PUBLIC_TO_PRIVATE", \ "WRITE_BARRIER", \ - "NURSERY_MOVED", \ + "MOVED", \ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ - "HAS_ID", \ - "IMMUTABLE", \ + "HAS_ID", \ + "IMMUTABLE", \ + "SMALLSTUB", \ + "MARKED", \ NULL } #define IS_POINTER(v) (!((v) & 1)) /* even-valued number */ diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -132,17 +132,16 @@ _Bool stm_pointer_equal(gcptr p1, gcptr p2) { - /* fast path for two equal pointers */ - if (p1 == p2) - return 1; - /* if p1 or p2 is NULL (but not both, because they are different - pointers), then return 0 */ - if (p1 == NULL || p2 == NULL) - return 0; - /* types must be the same */ - if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) - return 0; - return stm_id(p1) == stm_id(p2); + if (p1 != NULL && p2 != NULL) { + /* resolve h_original, but only if !PREBUILT_ORIGINAL */ + if (p1->h_original && !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p1 = (gcptr)p1->h_original; + } + if (p2->h_original && !(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p2 = (gcptr)p2->h_original; + } + } + return (p1 == p2); } /************************************************************/ diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -213,157 +213,229 @@ static struct GcPtrList objects_to_trace; -static void keep_original_alive(gcptr obj) +static gcptr copy_over_original(gcptr obj, gcptr id_copy) { - /* keep alive the original of a visited object */ - gcptr id_copy = (gcptr)obj->h_original; - /* prebuilt original objects may have a predifined - hash in h_original */ - if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; - /* see fix_outdated() */ - if (!(id_copy->h_tid & GCFLAG_VISITED)) { - id_copy->h_tid |= GCFLAG_VISITED; + assert(obj != id_copy); + assert(id_copy == (gcptr)obj->h_original); + assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ - /* XXX: may not always need tracing? */ - if (!(id_copy->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, id_copy); - } - } - else { - /* prebuilt originals won't get collected anyway - and if they are not reachable in any other way, - we only ever need their location, not their content */ + /* check a few flags */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + + assert(id_copy->h_tid & GCFLAG_PUBLIC); + assert(!(id_copy->h_tid & GCFLAG_BACKUP_COPY)); + + /* id_copy may be a stub, but in this case, as the original, it + should have been allocated with a big enough chunk of memory. + Also, obj itself might be a stub. */ + assert(!(id_copy->h_tid & GCFLAG_SMALLSTUB)); + if (!(id_copy->h_tid & GCFLAG_STUB) && !(obj->h_tid & GCFLAG_STUB)) { + assert(stmgc_size(id_copy) == stmgc_size(obj)); + } + + /* add the MOVED flag to 'obj' */ + obj->h_tid |= GCFLAG_MOVED; + + /* copy the object's content */ + size_t objsize; + if (obj->h_tid & GCFLAG_STUB) + objsize = sizeof(struct stm_stub_s); + else { + objsize = stmgc_size(obj); + assert(objsize > sizeof(struct stm_stub_s) - WORD); + } + dprintf(("copy %p over %p (%zd bytes)\n", obj, id_copy, objsize)); + memcpy(id_copy + 1, obj + 1, objsize - sizeof(struct stm_object_s)); + + /* copy the object's h_revision number */ + id_copy->h_revision = obj->h_revision; + + /* copy the STUB flag */ + id_copy->h_tid &= ~GCFLAG_STUB; + id_copy->h_tid |= (obj->h_tid & GCFLAG_STUB); + + return id_copy; +} + +static void visit_nonpublic(gcptr obj, struct tx_public_descriptor *gcp) +{ + /* Visit a protected or private object. 'gcp' must be either NULL or + point to the thread that has got the object. This 'gcp' is only an + optimization: it lets us trace (most) private/protected objects + and replace pointers to public objects in them with pointers to + private/protected objects if they are the most recent ones, + provided they belong to the same thread. + */ + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + assert(!(obj->h_tid & GCFLAG_STUB)); + assert(!(obj->h_tid & GCFLAG_HAS_ID)); + assert(!(obj->h_tid & GCFLAG_SMALLSTUB)); + assert(!(obj->h_tid & GCFLAG_MOVED)); + + if (obj->h_tid & GCFLAG_VISITED) + return; /* already visited */ + + obj->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; + gcptrlist_insert2(&objects_to_trace, obj, (gcptr)gcp); + + obj = (gcptr)obj->h_original; + if (obj != NULL) + obj->h_tid |= GCFLAG_MARKED; +} + +static gcptr visit_public(gcptr obj, struct tx_public_descriptor *gcp) +{ + /* The goal is to walk to the most recent copy, then copy its + content back into the h_original, and finally returns this + h_original. Or, if gcp != NULL and the most recent copy is + protected by precisely 'gcp', then we return it instead. + */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + + gcptr original; + if (obj->h_original != 0 && + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + original = (gcptr)obj->h_original; + /* the h_original may be protected, or private_from_protected, + in some cases. Then we can't use it. We'll use the most + recent h_revision which is public. */ + if (!(original->h_tid & GCFLAG_PUBLIC)) { + original->h_tid |= GCFLAG_MARKED; + original = NULL; } } + else + original = obj; + + /* the original object must not be a small stub. */ + assert(original == NULL || !(original->h_tid & GCFLAG_SMALLSTUB)); + + /* if 'original' was already visited, we are done */ + if (original != NULL && original->h_tid & GCFLAG_VISITED) + return original; + + /* walk to the head of the chained list */ + while (IS_POINTER(obj->h_revision)) { + if (!(obj->h_revision & 2)) { + obj = (gcptr)obj->h_revision; + assert(obj->h_tid & GCFLAG_PUBLIC); + continue; + } + + /* it's a stub: check the current stealing status */ + assert(obj->h_tid & GCFLAG_STUB); + gcptr obj2 = (gcptr)(obj->h_revision - 2); + + if (obj2->h_tid & GCFLAG_PUBLIC) { + /* the stub target itself was stolen, so is public now. + Continue looping from there. */ + obj = obj2; + continue; + } + + if (obj2->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* the stub target is a private_from_protected. */ + gcptr obj3 = (gcptr)obj2->h_revision; + if (obj3->h_tid & GCFLAG_PUBLIC) { + assert(!(obj3->h_tid & GCFLAG_BACKUP_COPY)); + /* the backup copy was stolen and is now a regular + public object. */ + obj = obj3; + continue; + } + else { + /* the backup copy was not stolen. Ignore this pair + obj2/obj3, and the head of the public chain is obj. + The pair obj2/obj3 was or will be handled by + mark_all_stack_roots(). */ + assert(obj3->h_tid & GCFLAG_BACKUP_COPY); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; + break; + } + } + else { + /* the stub target is just a protected object. + The head of the public chain is obj. We have to + explicitly keep obj2 alive. */ + assert(!IS_POINTER(obj2->h_revision)); + visit_nonpublic(obj2, STUB_THREAD(obj)); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; + break; + } + } + + /* at this point, 'obj' contains the most recent revision which is + public. */ + if (original == NULL) { + original = obj; + if (original->h_tid & GCFLAG_VISITED) + return original; + } + else if (obj != original) { + /* copy obj over original */ + copy_over_original(obj, original); + } + + /* return this original */ + original->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; + if (!(original->h_tid & GCFLAG_STUB)) + gcptrlist_insert2(&objects_to_trace, original, NULL); + return original; } -static void visit(gcptr *pobj); +static struct tx_public_descriptor *visit_protected_gcp; -gcptr stmgcpage_visit(gcptr obj) +static void visit_take_protected(gcptr *pobj) { - visit(&obj); - return obj; -} - -static void visit(gcptr *pobj) -{ + /* Visits '*pobj', marking it as surviving and possibly adding it to + objects_to_trace. Fixes *pobj to point to the exact copy that + survived. This function will replace *pobj with a protected + copy if it belongs to the thread 'visit_protected_gcp', so the + latter must be initialized before any call! + */ gcptr obj = *pobj; if (obj == NULL) return; - restart: - if (obj->h_revision & 1) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_STUB)); - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); - - keep_original_alive(obj); - } - } - else if (obj->h_tid & GCFLAG_PUBLIC) { - /* h_revision is a ptr: we have a more recent version */ - gcptr prev_obj = obj; - - if (!(obj->h_revision & 2)) { - /* go visit the more recent version */ - obj = (gcptr)obj->h_revision; - } - else { - /* it's a stub: keep it if it points to a protected version, - because we need to keep the effect of stealing if it is - later accessed by the wrong thread. If it points to a - public object (possibly outdated), we can ignore the stub. - */ - assert(obj->h_tid & GCFLAG_STUB); - obj = (gcptr)(obj->h_revision - 2); - if (!(obj->h_tid & GCFLAG_PUBLIC)) { - prev_obj->h_tid |= GCFLAG_VISITED; - keep_original_alive(prev_obj); - - assert(*pobj == prev_obj); - /* recursion, but should be only once */ - obj = stmgcpage_visit(obj); - assert(prev_obj->h_tid & GCFLAG_STUB); - prev_obj->h_revision = ((revision_t)obj) | 2; - return; - } - } - - if (!(obj->h_revision & 3)) { - /* obj is neither a stub nor a most recent revision: - completely ignore obj->h_revision */ - - obj = (gcptr)obj->h_revision; - assert(obj->h_tid & GCFLAG_PUBLIC); - prev_obj->h_revision = (revision_t)obj; - } - *pobj = obj; - goto restart; - } - else if (obj->h_tid & GCFLAG_VISITED) { - dprintf(("[already visited: %p]\n", obj)); - assert(obj == *pobj); - assert((obj->h_revision & 3) || /* either odd, or stub */ - (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - return; /* already seen */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + /* 'obj' is a private or protected copy. */ + visit_nonpublic(obj, visit_protected_gcp); } else { - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - gcptr B = (gcptr)obj->h_revision; - assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - - if (obj->h_original && (gcptr)obj->h_original != B) { - /* if B is original, it will be visited anyway */ - assert(obj->h_original == B->h_original); - assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - keep_original_alive(obj); - } - - obj->h_tid |= GCFLAG_VISITED; - B->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_STUB)); - assert(!(B->h_tid & GCFLAG_STUB)); - gcptrlist_insert2(&objects_to_trace, obj, B); - - if (IS_POINTER(B->h_revision)) { - assert(B->h_tid & GCFLAG_PUBLIC); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(B->h_revision & 2)); - - pobj = (gcptr *)&B->h_revision; - obj = *pobj; - goto restart; - } + *pobj = visit_public(obj, visit_protected_gcp); } } - -static void visit_keep(gcptr obj) +gcptr stmgcpage_visit(gcptr obj) { - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); - - if (IS_POINTER(obj->h_revision)) { - assert(!(obj->h_revision & 2)); - visit((gcptr *)&obj->h_revision); - } - keep_original_alive(obj); + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + visit_nonpublic(obj, NULL); } + else { + obj = visit_public(obj, NULL); + } + return obj; } static void visit_all_objects(void) { while (gcptrlist_size(&objects_to_trace) > 0) { + visit_protected_gcp = + (struct tx_public_descriptor *)gcptrlist_pop(&objects_to_trace); gcptr obj = gcptrlist_pop(&objects_to_trace); - stmgc_trace(obj, &visit); + stmgc_trace(obj, &visit_take_protected); } + visit_protected_gcp = NULL; } static void mark_prebuilt_roots(void) @@ -371,18 +443,20 @@ /* Note about prebuilt roots: 'stm_prebuilt_gcroots' is a list that contains all the ones that have been modified. Because they are themselves not in any page managed by this file, their - GCFLAG_VISITED will not be removed at the end of the current - collection. This is fine because the base object cannot contain - references to the heap. So we decided to systematically set - GCFLAG_VISITED on prebuilt objects. */ + GCFLAG_VISITED is not removed at the end of the current + collection. That's why we remove it here. GCFLAG_MARKED is not + relevant for prebuilt objects, but we avoid objects with MARKED + but not VISITED, which trigger some asserts. */ gcptr *pobj = stm_prebuilt_gcroots.items; gcptr *pend = stm_prebuilt_gcroots.items + stm_prebuilt_gcroots.size; - gcptr obj; + gcptr obj, obj2; for (; pobj != pend; pobj++) { obj = *pobj; + obj->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - assert(IS_POINTER(obj->h_revision)); - visit((gcptr *)&obj->h_revision); + + obj2 = visit_public(obj, NULL); + assert(obj2 == obj); /* it is its own original */ } } @@ -396,7 +470,7 @@ if (((revision_t)item) & ~((revision_t)END_MARKER_OFF | (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ - visit(root); + visit_take_protected(root); dprintf(("visit stack root: %p -> %p\n", item, *root)); } else if (item == END_MARKER_OFF) { @@ -409,15 +483,19 @@ static void mark_all_stack_roots(void) { struct tx_descriptor *d; + struct GcPtrList new_public_to_private; + memset(&new_public_to_private, 0, sizeof(new_public_to_private)); + for (d = stm_tx_head; d; d = d->tx_next) { assert(!stm_has_got_any_lock(d)); + visit_protected_gcp = d->public_descriptor; /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); /* the thread-local object */ - visit(d->thread_local_obj_ref); - visit(&d->old_thread_local_obj); + visit_take_protected(d->thread_local_obj_ref); + visit_take_protected(&d->old_thread_local_obj); /* the current transaction's private copies of public objects */ wlog_t *item; @@ -427,37 +505,50 @@ gcptr R = item->addr; gcptr L = item->val; - /* Objects that were not visited yet must have the PUB_TO_PRIV - flag. Except if that transaction will abort anyway, then it - may be removed from a previous major collection that didn't - fix the PUB_TO_PRIV because the transaction was going to - abort anyway: - 1. minor_collect before major collect (R->L, R is outdated, abort) - 2. major collect removes flag - 3. major collect again, same thread, no time to abort - 4. flag still removed - */ - assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, - R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - visit_keep(R); + /* we visit the public object R. Must keep a public object + here, so we pass NULL as second argument. */ + gcptr new_R = visit_public(R, NULL); + assert(new_R->h_tid & GCFLAG_PUBLIC); + + if (new_R != R) { + /* we have to update the key in public_to_private, which + can only be done by deleting the existing key and + (after the loop) re-inserting the new key. */ + G2L_LOOP_DELETE(item); + gcptrlist_insert2(&new_public_to_private, new_R, L); + } + + /* we visit the private copy L --- which at this point + should be private, possibly private_from_protected, + so visit() should return the same private copy */ if (L != NULL) { - /* minor collection found R->L in public_to_young - and R was modified. It then sets item->val to NULL and wants - to abort later. */ - revision_t v = L->h_revision; - visit_keep(L); - /* a bit of custom logic here: if L->h_revision used to - point exactly to R, as set by stealing, then we must - keep this property, even though visit_keep(L) might - decide it would be better to make it point to a more - recent copy. */ - if (v == (revision_t)R) { - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - L->h_revision = v; /* restore */ - } + visit_nonpublic(L, visit_protected_gcp); } + } G2L_LOOP_END; + /* reinsert to real pub_to_priv */ + long i, size = new_public_to_private.size; + gcptr *items = new_public_to_private.items; + for (i = 0; i < size; i += 2) { + g2l_insert(&d->public_to_private, items[i], items[i + 1]); + } + gcptrlist_clear(&new_public_to_private); + + /* the current transaction's private copies of protected objects */ + items = d->private_from_protected.items; + for (i = d->private_from_protected.size - 1; i >= 0; i--) { + gcptr obj = items[i]; + assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + visit_nonpublic(obj, visit_protected_gcp); + + gcptr backup_obj = (gcptr)obj->h_revision; + if (!(backup_obj->h_tid & GCFLAG_PUBLIC)) + visit_nonpublic(backup_obj, visit_protected_gcp); + else + obj->h_revision = (revision_t)visit_public(backup_obj, NULL); + } + /* make sure that the other lists are empty */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); @@ -473,27 +564,16 @@ assert(gcptrlist_size(&d->private_from_protected) == d->num_private_from_protected_known_old); } + + visit_protected_gcp = NULL; + gcptrlist_delete(&new_public_to_private); } static void cleanup_for_thread(struct tx_descriptor *d) { long i; gcptr *items; - - /* It can occur that 'private_from_protected' contains an object that - * has not been visited at all (maybe only in inevitable - * transactions). - */ - items = d->private_from_protected.items; - for (i = d->private_from_protected.size - 1; i >= 0; i--) { - gcptr obj = items[i]; - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - - if (!(obj->h_tid & GCFLAG_VISITED)) { - /* forget 'obj' */ - items[i] = items[--d->private_from_protected.size]; - } - } + assert(d->old_objects_to_trace.size == 0); /* If we're aborting this transaction anyway, we don't need to do * more here. @@ -516,21 +596,29 @@ items = d->list_of_read_objects.items; for (i = d->list_of_read_objects.size - 1; i >= 0; --i) { gcptr obj = items[i]; - assert(!(obj->h_tid & GCFLAG_STUB)); - /* Warning: in case the object listed is outdated and has been - replaced with a more recent revision, then it might be the - case that obj->h_revision doesn't have GCFLAG_VISITED, but - just removing it is very wrong --- we want 'd' to abort. - */ - if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + if (obj->h_tid & GCFLAG_MOVED) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(IS_POINTER(obj->h_original)); + obj = (gcptr)obj->h_original; + items[i] = obj; + } + else if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* Warning: in case the object listed is outdated and has been + replaced with a more recent revision, then it might be the + case that obj->h_revision doesn't have GCFLAG_VISITED, but + just removing it is very wrong --- we want 'd' to abort. + */ /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; + + /* the backup-ptr should already be updated: */ + assert(!(obj->h_tid & GCFLAG_MOVED)); } revision_t v = obj->h_revision; - if (IS_POINTER(v)) { + if ((obj->h_tid & GCFLAG_STUB) || IS_POINTER(v)) { /* has a more recent revision. Oups. */ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); @@ -572,7 +660,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { assert(item->addr->h_tid & GCFLAG_VISITED); assert(item->val->h_tid & GCFLAG_VISITED); - + assert(!(item->addr->h_tid & GCFLAG_MOVED)); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, which becomes: */ @@ -611,7 +699,9 @@ and the flag is removed; other locations are marked as free. */ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) break; /* first object that stays alive */ p = (gcptr)(((char *)p) + obj_size); } @@ -621,8 +711,10 @@ surviving_pages = lpage; p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); mc_total_in_use += obj_size; } else { @@ -648,6 +740,7 @@ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { assert(!(p->h_tid & GCFLAG_VISITED)); + assert(!(p->h_tid & GCFLAG_MARKED)); if (p->h_tid != DEBUG_WORD(0xDD)) { dprintf(("| freeing %p (with page %p)\n", p, lpage)); } @@ -677,8 +770,10 @@ G2L_LOOP_FORWARD(gcp->nonsmall_objects, item) { gcptr p = item->addr; - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); } else { G2L_LOOP_DELETE(item); diff --git a/rpython/translator/stm/src_stm/lists.c b/rpython/translator/stm/src_stm/lists.c --- a/rpython/translator/stm/src_stm/lists.c +++ b/rpython/translator/stm/src_stm/lists.c @@ -19,7 +19,7 @@ void g2l_delete(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); memset(g2l, 0, sizeof(struct G2L)); } @@ -57,7 +57,7 @@ long alloc = g2l->raw_end - g2l->raw_start; long newalloc = (alloc + extra + (alloc >> 2) + 31) & ~15; //fprintf(stderr, "growth: %ld\n", newalloc); - char *newitems = malloc(newalloc); + char *newitems = stm_malloc(newalloc); newg2l.raw_start = newitems; newg2l.raw_current = newitems; newg2l.raw_end = newitems + newalloc; @@ -66,7 +66,7 @@ { g2l_insert(&newg2l, item->addr, item->val); } G2L_LOOP_END; - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); *g2l = newg2l; } @@ -152,7 +152,7 @@ //fprintf(stderr, "list %p deleted (%ld KB)\n", //gcptrlist, gcptrlist->alloc * sizeof(gcptr) / 1024); gcptrlist->size = 0; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = NULL; gcptrlist->alloc = 0; } @@ -163,7 +163,8 @@ return; size_t nsize = gcptrlist->size * sizeof(gcptr); - gcptr *newitems = realloc(gcptrlist->items, nsize); + gcptr *newitems = stm_realloc(gcptrlist->items, nsize, + gcptrlist->alloc * sizeof(gcptr)); if (newitems != NULL || nsize == 0) { gcptrlist->items = newitems; @@ -178,11 +179,11 @@ //fprintf(stderr, "list %p growth to %ld items (%ld KB)\n", // gcptrlist, newalloc, newalloc * sizeof(gcptr) / 1024); - gcptr *newitems = malloc(newalloc * sizeof(gcptr)); + gcptr *newitems = stm_malloc(newalloc * sizeof(gcptr)); long i; for (i=0; isize; i++) newitems[i] = gcptrlist->items[i]; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = newitems; gcptrlist->alloc = newalloc; } diff --git a/rpython/translator/stm/src_stm/lists.h b/rpython/translator/stm/src_stm/lists.h --- a/rpython/translator/stm/src_stm/lists.h +++ b/rpython/translator/stm/src_stm/lists.h @@ -2,6 +2,8 @@ #ifndef _SRCSTM_LISTS_H #define _SRCSTM_LISTS_H +#include "dbgmem.h" + /************************************************************/ /* The g2l_xx functions ("global_to_local") are implemented as a tree, @@ -37,7 +39,7 @@ void g2l_clear(struct G2L *g2l); void g2l_delete(struct G2L *g2l); static inline void g2l_delete_not_used_any_more(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); } static inline int g2l_any_entry(struct G2L *g2l) { diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -137,7 +137,7 @@ static inline gcptr create_old_object_copy(gcptr obj) { assert(!(obj->h_tid & GCFLAG_PUBLIC)); - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); assert(!(obj->h_tid & GCFLAG_VISITED)); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); @@ -160,9 +160,9 @@ } else { /* it's a nursery object. Was it already moved? */ - if (UNLIKELY(obj->h_tid & GCFLAG_NURSERY_MOVED)) { + if (UNLIKELY(obj->h_tid & GCFLAG_MOVED)) { /* yes. Such an object can be a public object in the nursery - too (such objects are always NURSERY_MOVED). For all cases, + too (such objects are always MOVED). For all cases, we can just fix the ref. Can be stolen objects or those we already moved. */ @@ -183,7 +183,7 @@ fresh_old_copy = create_old_object_copy(obj); } - obj->h_tid |= GCFLAG_NURSERY_MOVED; + obj->h_tid |= GCFLAG_MOVED; obj->h_revision = (revision_t)fresh_old_copy; /* fix the original reference */ @@ -233,8 +233,23 @@ assert(items[i]->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); assert(IS_POINTER(items[i]->h_revision)); + /* if items[i] is young, move it, update the pointer, and + schedule the object for later consideration by + visit_all_outside_objects() (which will for example ensure + that the WRITE_BARRIER flag is added to it). + */ visit_if_young(&items[i]); + /* the backup copy is always allocated outside the nursery, + but we have to trace it as well, as it may contain its own + young pointers. + + but only once: if the transaction was running for long enough + to have num_private_from_protected_known_old > 0, then the + backup copies of known-old objects have already been traced + in a previous minor collection, and as they are read-only, + they cannot contain young pointers any more. + */ stmgc_trace((gcptr)items[i]->h_revision, &visit_if_young); } @@ -386,13 +401,13 @@ /* non-young or visited young objects are kept */ continue; } - else if (obj->h_tid & GCFLAG_NURSERY_MOVED) { + else if (obj->h_tid & GCFLAG_MOVED) { /* visited nursery objects are kept and updated */ items[i] = (gcptr)obj->h_revision; assert(!(items[i]->h_tid & GCFLAG_STUB)); continue; } - /* Sanity check: a nursery object without the NURSERY_MOVED flag + /* Sanity check: a nursery object without the MOVED flag is necessarily a private-without-backup object, or a protected object; it cannot be a public object. */ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); @@ -432,7 +447,7 @@ setup_minor_collect(d); /* first do this, which asserts that some objects are private --- - which fails if they have already been GCFLAG_NURSERY_MOVED */ + which fails if they have already been GCFLAG_MOVED */ mark_public_to_young(d); mark_young_roots(d); diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -4cad3aa5a20b +c528da482152 diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -31,7 +31,7 @@ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); if (obj->h_tid & GCFLAG_PUBLIC) { /* young public, replace with stolen old copy */ - assert(obj->h_tid & GCFLAG_NURSERY_MOVED); + assert(obj->h_tid & GCFLAG_MOVED); assert(IS_POINTER(obj->h_revision)); stub = (gcptr)obj->h_revision; assert(!IS_POINTER(stub->h_revision)); /* not outdated */ @@ -56,7 +56,7 @@ if (!(obj->h_original)) obj->h_original = (revision_t)O; } - obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); obj->h_revision = (revision_t)O; O->h_tid |= GCFLAG_PUBLIC; @@ -105,6 +105,8 @@ stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; + if (size == 0) + stub->h_tid |= GCFLAG_SMALLSTUB; stub->h_revision = ((revision_t)obj) | 2; if (obj->h_original) { stub->h_original = obj->h_original; @@ -206,7 +208,7 @@ /* note that we should follow h_revision at least one more step: it is necessary if L is public but young (and then - has GCFLAG_NURSERY_MOVED), but it is fine to do it more + has GCFLAG_MOVED), but it is fine to do it more generally. */ v = ACCESS_ONCE(L->h_revision); if (IS_POINTER(v)) { @@ -239,7 +241,7 @@ } L->h_revision = (revision_t)O; - L->h_tid |= GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED; + L->h_tid |= GCFLAG_PUBLIC | GCFLAG_MOVED; /* subtle: we need to remove L from the fxcache of the target thread, otherwise its read barrier might not trigger on it. It is mostly fine because it is anyway identical to O. But diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -24,7 +24,8 @@ #define STM_SIZE_OF_USER_TID (sizeof(revision_t) / 2) /* in bytes */ #define STM_FIRST_GCFLAG (1L << (8 * STM_SIZE_OF_USER_TID)) #define STM_USER_TID_MASK (STM_FIRST_GCFLAG - 1) -#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 4 + 8)) +#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * ((1<<0) | (1<<1) | \ + (1<<2) | (1<<3) | (1<<13))) #define PREBUILT_REVISION 1 diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -53,7 +53,7 @@ static void init_shadowstack(void) { struct tx_descriptor *d = thread_descriptor; - d->shadowstack = malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); + d->shadowstack = stm_malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); if (!d->shadowstack) { stm_fatalerror("out of memory: shadowstack\n"); } @@ -69,7 +69,7 @@ assert(x == END_MARKER_ON); assert(stm_shadowstack == d->shadowstack); stm_shadowstack = NULL; - free(d->shadowstack); + stm_free(d->shadowstack, sizeof(gcptr) * LENGTH_SHADOW_STACK); } void stm_set_max_aborts(int max_aborts) diff --git a/rpython/translator/stm/src_stm/weakref.c b/rpython/translator/stm/src_stm/weakref.c --- a/rpython/translator/stm/src_stm/weakref.c +++ b/rpython/translator/stm/src_stm/weakref.c @@ -28,7 +28,7 @@ */ while (gcptrlist_size(&d->young_weakrefs) > 0) { gcptr weakref = gcptrlist_pop(&d->young_weakrefs); - if (!(weakref->h_tid & GCFLAG_NURSERY_MOVED)) + if (!(weakref->h_tid & GCFLAG_MOVED)) continue; /* the weakref itself dies */ weakref = (gcptr)weakref->h_revision; @@ -37,7 +37,7 @@ assert(pointing_to != NULL); if (stmgc_is_in_nursery(d, pointing_to)) { - if (pointing_to->h_tid & GCFLAG_NURSERY_MOVED) { + if (pointing_to->h_tid & GCFLAG_MOVED) { dprintf(("weakref ptr moved %p->%p\n", WEAKREF_PTR(weakref, size), (gcptr)pointing_to->h_revision)); @@ -69,49 +69,25 @@ static _Bool is_partially_visited(gcptr obj) { - /* Based on gcpage.c:visit(). Check the code here if we simplify - visit(). Returns True or False depending on whether we find any - version of 'obj' to be VISITED or not. + /* Based on gcpage.c:visit_public(). Check the code here if we change + visit_public(). Returns True or False depending on whether we find any + version of 'obj' to be MARKED or not. */ - restart: - if (obj->h_tid & GCFLAG_VISITED) + assert(IMPLIES(obj->h_tid & GCFLAG_VISITED, + obj->h_tid & GCFLAG_MARKED)); + if (obj->h_tid & GCFLAG_MARKED) return 1; - if (obj->h_revision & 1) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_STUB)); + if (!(obj->h_tid & GCFLAG_PUBLIC)) return 0; - } - else if (obj->h_tid & GCFLAG_PUBLIC) { - /* h_revision is a ptr: we have a more recent version */ - if (!(obj->h_revision & 2)) { - /* go visit the more recent version */ - obj = (gcptr)obj->h_revision; - } - else { - /* it's a stub */ - assert(obj->h_tid & GCFLAG_STUB); - obj = (gcptr)(obj->h_revision - 2); - } - goto restart; - } - else { - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - gcptr B = (gcptr)obj->h_revision; - assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - if (B->h_tid & GCFLAG_VISITED) + + if (obj->h_original != 0 && + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + gcptr original = (gcptr)obj->h_original; + assert(IMPLIES(original->h_tid & GCFLAG_VISITED, + original->h_tid & GCFLAG_MARKED)); + if (original->h_tid & GCFLAG_MARKED) return 1; - assert(!(obj->h_tid & GCFLAG_STUB)); - assert(!(B->h_tid & GCFLAG_STUB)); - - if (IS_POINTER(B->h_revision)) { - assert(B->h_tid & GCFLAG_PUBLIC); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(B->h_revision & 2)); - - obj = (gcptr)B->h_revision; - goto restart; - } } return 0; } From noreply at buildbot.pypy.org Sat Jul 27 16:54:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 16:54:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Update these constants Message-ID: <20130727145403.22B8B1C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65715:a5a607e647f7 Date: 2013-07-27 16:53 +0200 http://bitbucket.org/pypy/pypy/changeset/a5a607e647f7/ Log: Update these constants diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -44,15 +44,16 @@ GCFLAG_PREBUILT_ORIGINAL = first_gcflag << 3 GCFLAG_PUBLIC_TO_PRIVATE = first_gcflag << 4 GCFLAG_WRITE_BARRIER = first_gcflag << 5 # stmgc.h - GCFLAG_NURSERY_MOVED = first_gcflag << 6 + GCFLAG_MOVED = first_gcflag << 6 GCFLAG_BACKUP_COPY = first_gcflag << 7 # debug GCFLAG_STUB = first_gcflag << 8 # debug GCFLAG_PRIVATE_FROM_PROTECTED = first_gcflag << 9 GCFLAG_HAS_ID = first_gcflag << 10 GCFLAG_IMMUTABLE = first_gcflag << 11 GCFLAG_SMALLSTUB = first_gcflag << 12 + GCFLAG_MARKED = first_gcflag << 13 - PREBUILT_FLAGS = first_gcflag * (1 + 2 + 4 + 8) + PREBUILT_FLAGS = first_gcflag * ((1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<13)) PREBUILT_REVISION = r_uint(1) FX_MASK = 65535 From noreply at buildbot.pypy.org Sat Jul 27 17:03:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 17:03:41 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Add a direct translation test for weakrefs Message-ID: <20130727150341.618071C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65716:98a8c851a299 Date: 2013-07-27 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/98a8c851a299/ Log: Add a direct translation test for weakrefs diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -110,7 +110,7 @@ # XXX finalizers are ignored for now #ll_assert(not needs_finalizer, 'XXX needs_finalizer') #ll_assert(not is_finalizer_light, 'XXX is_finalizer_light') - #ll_assert(not contains_weakptr, 'XXX contains_weakptr') + ll_assert(not contains_weakptr, 'contains_weakptr: use malloc_weakref') # XXX call optimized versions, e.g. if size < GC_NURSERY_SECTION return llop.stm_allocate(llmemory.GCREF, size, typeid16) diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -1,5 +1,5 @@ import py -from rpython.rlib import rstm, rgc +from rpython.rlib import rstm, rgc, objectmodel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr @@ -276,3 +276,30 @@ t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') assert 'li102ee10:hi there 3e\n0\n' in data + + def test_weakref(self): + import weakref + class Foo(object): + pass + + def f(argv): + foo = Foo() + foo.n = argv + w = weakref.ref(foo) + assert w() is foo + objectmodel.keepalive_until_here(foo) + return w + f._dont_inline_ = True + + def main(argv): + w = f(argv) + assert w() is not None + assert len(w().n) == len(argv) + rgc.collect() + assert w() is None + print 'test ok' + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('a b') + assert 'test ok\n' in data From noreply at buildbot.pypy.org Sat Jul 27 17:09:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 17:09:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: A problem with can_move() Message-ID: <20130727150943.BCA661C1055@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65717:8dc08afd119f Date: 2013-07-27 17:09 +0200 http://bitbucket.org/pypy/pypy/changeset/8dc08afd119f/ Log: A problem with can_move() diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -132,7 +132,8 @@ seen by the GC, then it can get collected.""" tid = self.get_hdr_tid(obj)[0] if bool(tid & self.GCFLAG_OLD): - return False + return False # XXX wrong so far. We should add a flag to the + # object that means "don't ever kill this copy" return True From noreply at buildbot.pypy.org Sat Jul 27 17:11:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 27 Jul 2013 17:11:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Another XXX Message-ID: <20130727151143.A58C21C1055@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c4 Changeset: r65718:fcf0ada2f62f Date: 2013-07-27 17:11 +0200 http://bitbucket.org/pypy/pypy/changeset/fcf0ada2f62f/ Log: Another XXX diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -139,6 +139,7 @@ @classmethod def JIT_max_size_of_young_obj(cls): + # XXX there is actually a maximum, check return None @classmethod From noreply at buildbot.pypy.org Sat Jul 27 20:21:46 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 27 Jul 2013 20:21:46 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: merge fast-slowpath Message-ID: <20130727182146.31D6E1C1055@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-gen-store-back-in Changeset: r65719:90fd45e09125 Date: 2013-07-27 15:16 +0200 http://bitbucket.org/pypy/pypy/changeset/90fd45e09125/ Log: merge fast-slowpath diff too long, truncating to 2000 out of 4227 lines diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -65,6 +65,11 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check + g['OPT'] = "" + g['CFLAGS'] = "" + g['CPPFLAGS'] = "" + g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' + g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -122,13 +127,34 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) + cc, opt, cflags, ccshared, ldshared = get_config_vars( + 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') + compiler.shared_lib_extension = get_config_var('SO') - if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() - compiler.compiler.extend(cflags) - compiler.compiler_so.extend(cflags) - compiler.linker_so.extend(cflags) + + if 'LDSHARED' in os.environ: + ldshared = os.environ['LDSHARED'] + if 'CPP' in os.environ: + cpp = os.environ['CPP'] + else: + cpp = cc + " -E" # not always + if 'LDFLAGS' in os.environ: + ldshared = ldshared + ' ' + os.environ['LDFLAGS'] + if 'CFLAGS' in os.environ: + cflags = opt + ' ' + os.environ['CFLAGS'] + ldshared = ldshared + ' ' + os.environ['CFLAGS'] + if 'CPPFLAGS' in os.environ: + cpp = cpp + ' ' + os.environ['CPPFLAGS'] + cflags = cflags + ' ' + os.environ['CPPFLAGS'] + ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] + + cc_cmd = cc + ' ' + cflags + + compiler.set_executables( + preprocessor=cpp, + compiler=cc_cmd, + compiler_so=cc_cmd + ' ' + ccshared, + linker_so=ldshared) from sysconfig_cpython import ( diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -75,6 +76,15 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) @@ -147,5 +157,8 @@ _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -4,6 +4,12 @@ .. contents:: +.. warning:: + + Please `read this FAQ entry`_ first! + +.. _`read this FAQ entry`: http://doc.pypy.org/en/latest/faq.html#do-i-have-to-rewrite-my-programs-in-rpython + RPython is a subset of Python that can be statically compiled. The PyPy interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,60 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. + +Highlights +========== + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +* During packaging, compile the CFFI tk extension. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -43,7 +43,7 @@ See cbuild_ for more info on ExternalCompilationInfo. .. _`low level types`: rtyper.html#low-level-type -.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/pypy/translator/tool/cbuild.py +.. _cbuild: https://bitbucket.org/pypy/pypy/src/tip/rpython/translator/tool/cbuild.py Types @@ -69,9 +69,3 @@ as a fake low-level implementation for tests performed by an llinterp. .. _`extfunc.py`: https://bitbucket.org/pypy/pypy/src/tip/pypy/rpython/extfunc.py - - -OO backends ------------ - -XXX to be written diff --git a/pypy/doc/whatsnew-2.1.rst b/pypy/doc/whatsnew-2.1.rst --- a/pypy/doc/whatsnew-2.1.rst +++ b/pypy/doc/whatsnew-2.1.rst @@ -76,3 +76,8 @@ .. branch: inline-identityhash Inline the fast path of id() and hash() + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,28 @@ .. branch: improve-str2charp Improve the performance of I/O writing up to 15% by using memcpy instead of copying char-by-char in str2charp and get_nonmovingbuffer + +.. branch: flowoperators +Simplify rpython/flowspace/ code by using more metaprogramming. Create +SpaceOperator class to gather static information about flow graph operations. + +.. branch: package-tk +Adapt package.py script to compile CFFI tk extension. Add a --without-tk switch +to optionally skip it. + +.. branch: distutils-cppldflags +Copy CPython's implementation of customize_compiler, dont call split on +environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and LDFLAGS on Unices. + +.. branch: precise-instantiate +When an RPython class is instantiated via an indirect call (that is, which +class is being instantiated isn't known precisely) allow the optimizer to have +more precise information about which functions can be called. Needed for Topaz. + +.. branch: ssl_moving_write_buffer + +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -196,6 +196,11 @@ print >> sys.stderr, "Python", sys.version raise SystemExit + +def funroll_loops(*args): + print("Vroom vroom, I'm a racecar!") + + def set_jit_option(options, jitparam, *args): if jitparam == 'help': _print_jit_help() @@ -381,6 +386,7 @@ 'Q': (div_option, Ellipsis), '--info': (print_info, None), '--jit': (set_jit_option, Ellipsis), + '-funroll-loops': (funroll_loops, None), '--': (end_options, None), } diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -370,7 +370,7 @@ from pypy.module._pickle_support import maker # helper fns from pypy.interpreter.pycode import PyCode from pypy.interpreter.module import Module - args_w = space.unpackiterable(w_args) + args_w = space.unpackiterable(w_args, 18) w_f_back, w_builtin, w_pycode, w_valuestack, w_blockstack, w_exc_value, w_tb,\ w_globals, w_last_instr, w_finished, w_f_lineno, w_fastlocals, w_f_locals, \ w_f_trace, w_instr_lb, w_instr_ub, w_instr_prev_plus_one, w_cells = args_w diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -226,6 +226,10 @@ restore_top_frame(f1, saved) f2 = pickle.loads(pckl) + def test_frame_setstate_crash(self): + import sys + raises(ValueError, sys._getframe().__setstate__, []) + def test_pickle_traceback(self): def f(): try: diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -36,6 +36,20 @@ } +class IntOpModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'int_add': 'interp_intop.int_add', + 'int_sub': 'interp_intop.int_sub', + 'int_mul': 'interp_intop.int_mul', + 'int_floordiv': 'interp_intop.int_floordiv', + 'int_mod': 'interp_intop.int_mod', + 'int_lshift': 'interp_intop.int_lshift', + 'int_rshift': 'interp_intop.int_rshift', + 'uint_rshift': 'interp_intop.uint_rshift', + } + + class Module(MixedModule): appleveldefs = { } @@ -67,6 +81,7 @@ "builders": BuildersModule, "time": TimeModule, "thread": ThreadModule, + "intop": IntOpModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_intop.py b/pypy/module/__pypy__/interp_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_intop.py @@ -0,0 +1,39 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib.rarithmetic import r_uint, intmask + + + at unwrap_spec(n=int, m=int) +def int_add(space, n, m): + return space.wrap(llop.int_add(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_sub(space, n, m): + return space.wrap(llop.int_sub(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mul(space, n, m): + return space.wrap(llop.int_mul(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_floordiv(space, n, m): + return space.wrap(llop.int_floordiv(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_mod(space, n, m): + return space.wrap(llop.int_mod(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_lshift(space, n, m): + return space.wrap(llop.int_lshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def int_rshift(space, n, m): + return space.wrap(llop.int_rshift(lltype.Signed, n, m)) + + at unwrap_spec(n=int, m=int) +def uint_rshift(space, n, m): + n = r_uint(n) + x = llop.uint_rshift(lltype.Unsigned, n, m) + return space.wrap(intmask(x)) diff --git a/pypy/module/__pypy__/test/test_intop.py b/pypy/module/__pypy__/test/test_intop.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_intop.py @@ -0,0 +1,104 @@ + + +class AppTestIntOp: + spaceconfig = dict(usemodules=['__pypy__']) + + def w_intmask(self, n): + import sys + n &= (sys.maxsize*2+1) + if n > sys.maxsize: + n -= 2*(sys.maxsize+1) + return int(n) + + def test_intmask(self): + import sys + assert self.intmask(sys.maxsize) == sys.maxsize + assert self.intmask(sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(-sys.maxsize-2) == sys.maxsize + N = 2 ** 128 + assert self.intmask(N+sys.maxsize) == sys.maxsize + assert self.intmask(N+sys.maxsize+1) == -sys.maxsize-1 + assert self.intmask(N-sys.maxsize-2) == sys.maxsize + + def test_int_add(self): + import sys + from __pypy__ import intop + assert intop.int_add(40, 2) == 42 + assert intop.int_add(sys.maxsize, 1) == -sys.maxsize-1 + assert intop.int_add(-2, -sys.maxsize) == sys.maxsize + + def test_int_sub(self): + import sys + from __pypy__ import intop + assert intop.int_sub(40, -2) == 42 + assert intop.int_sub(sys.maxsize, -1) == -sys.maxsize-1 + assert intop.int_sub(-2, sys.maxsize) == sys.maxsize + + def test_int_mul(self): + import sys + from __pypy__ import intop + assert intop.int_mul(40, -2) == -80 + assert intop.int_mul(-sys.maxsize, -sys.maxsize) == ( + self.intmask(sys.maxsize ** 2)) + + def test_int_floordiv(self): + import sys + from __pypy__ import intop + assert intop.int_floordiv(41, 3) == 13 + assert intop.int_floordiv(41, -3) == -13 + assert intop.int_floordiv(-41, 3) == -13 + assert intop.int_floordiv(-41, -3) == 13 + assert intop.int_floordiv(-sys.maxsize, -1) == sys.maxsize + assert intop.int_floordiv(sys.maxsize, -1) == -sys.maxsize + + def test_int_mod(self): + import sys + from __pypy__ import intop + assert intop.int_mod(41, 3) == 2 + assert intop.int_mod(41, -3) == 2 + assert intop.int_mod(-41, 3) == -2 + assert intop.int_mod(-41, -3) == -2 + assert intop.int_mod(-sys.maxsize, -1) == 0 + assert intop.int_mod(sys.maxsize, -1) == 0 + + def test_int_lshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + assert intop.int_lshift(42, 3) == 42 << 3 + assert intop.int_lshift(0, 3333) == 0 + assert intop.int_lshift(1, bits-2) == 1 << (bits-2) + assert intop.int_lshift(1, bits-1) == -sys.maxsize-1 == (-1) << (bits-1) + assert intop.int_lshift(-1, bits-2) == (-1) << (bits-2) + assert intop.int_lshift(-1, bits-1) == -sys.maxsize-1 + assert intop.int_lshift(sys.maxsize // 3, 2) == ( + self.intmask((sys.maxsize // 3) << 2)) + assert intop.int_lshift(-sys.maxsize // 3, 2) == ( + self.intmask((-sys.maxsize // 3) << 2)) + + def test_int_rshift(self): + from __pypy__ import intop + assert intop.int_rshift(42, 3) == 42 >> 3 + assert intop.int_rshift(-42, 3) == (-42) >> 3 + assert intop.int_rshift(0, 3333) == 0 + assert intop.int_rshift(-1, 0) == -1 + assert intop.int_rshift(-1, 1) == -1 + + def test_uint_rshift(self): + import sys + from __pypy__ import intop + if sys.maxsize == 2**31-1: + bits = 32 + else: + bits = 64 + N = 1 << bits + assert intop.uint_rshift(42, 3) == 42 >> 3 + assert intop.uint_rshift(-42, 3) == (N-42) >> 3 + assert intop.uint_rshift(0, 3333) == 0 + assert intop.uint_rshift(-1, 0) == -1 + assert intop.uint_rshift(-1, 1) == sys.maxsize + assert intop.uint_rshift(-1, bits-2) == 3 + assert intop.uint_rshift(-1, bits-1) == 1 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1219,6 +1219,54 @@ for i, f in enumerate(flist): assert f(-142) == -142 + i +def test_callback_receiving_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(s): + return s.a + 10 * s.b + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, -4]) + n = f(p[0]) + assert n == -42 + +def test_callback_returning_tiny_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BSChar, -1)]) + def cb(n): + return newp(BStructPtr, [-n, -3*n])[0] + BFunc = new_function_type((BInt,), BStruct) + f = callback(BFunc, cb) + s = f(10) + assert typeof(s) is BStruct + assert repr(s) == "" + assert s.a == -10 + assert s.b == -30 + +def test_callback_receiving_struct(): + BSChar = new_primitive_type("signed char") + BInt = new_primitive_type("int") + BDouble = new_primitive_type("double") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BSChar, -1), + ('b', BDouble, -1)]) + def cb(s): + return s.a + int(s.b) + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, [-2, 44.444]) + n = f(p[0]) + assert n == 42 + def test_callback_returning_struct(): BSChar = new_primitive_type("signed char") BInt = new_primitive_type("int") @@ -1238,6 +1286,30 @@ assert s.a == -10 assert s.b == 1E-42 +def test_callback_receiving_big_struct(): + BInt = new_primitive_type("int") + BStruct = new_struct_type("struct foo") + BStructPtr = new_pointer_type(BStruct) + complete_struct_or_union(BStruct, [('a', BInt, -1), + ('b', BInt, -1), + ('c', BInt, -1), + ('d', BInt, -1), + ('e', BInt, -1), + ('f', BInt, -1), + ('g', BInt, -1), + ('h', BInt, -1), + ('i', BInt, -1), + ('j', BInt, -1)]) + def cb(s): + for i, name in enumerate("abcdefghij"): + assert getattr(s, name) == 13 - i + return 42 + BFunc = new_function_type((BStruct,), BInt) + f = callback(BFunc, cb) + p = newp(BStructPtr, list(range(13, 3, -1))) + n = f(p[0]) + assert n == 42 + def test_callback_returning_big_struct(): BInt = new_primitive_type("int") BStruct = new_struct_type("struct foo") @@ -2760,6 +2832,20 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) +def test_new_handle_cycle(): + import _weakref + BVoidP = new_pointer_type(new_void_type()) + class A(object): + pass + o = A() + o.cycle = newp_handle(BVoidP, o) + wr = _weakref.ref(o) + del o + for i in range(3): + if wr() is not None: + import gc; gc.collect() + assert wr() is None + def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -150,7 +150,7 @@ return self.do_and_wrap(w_ffitype) -class TestFromAppLevel(object): +class TestToAppLevel(object): spaceconfig = dict(usemodules=('_ffi',)) def setup_class(cls): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1107,6 +1107,14 @@ S2E = _rawffi.Structure([('bah', (EMPTY, 1))]) S2E.get_ffi_type() # does not hang + def test_overflow_error(self): + import _rawffi + A = _rawffi.Array('d') + arg1 = A(1) + raises(OverflowError, "arg1[0] = 10**900") + arg1.free() + + class AppTestAutoFree: spaceconfig = dict(usemodules=['_rawffi', 'struct']) diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -473,7 +473,7 @@ option_ptr = rffi.cast(rffi.INTP, value_ptr) option_ptr[0] = space.int_w(w_option) elif cmd == _c.SIO_KEEPALIVE_VALS: - w_onoff, w_time, w_interval = space.unpackiterable(w_option) + w_onoff, w_time, w_interval = space.unpackiterable(w_option, 3) option_ptr = rffi.cast(lltype.Ptr(_c.tcp_keepalive), value_ptr) option_ptr.c_onoff = space.uint_w(w_onoff) option_ptr.c_keepalivetime = space.uint_w(w_time) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -722,7 +722,10 @@ libssl_SSL_CTX_set_verify(ss.ctx, verification_mode, None) ss.ssl = libssl_SSL_new(ss.ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL - libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY) + # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address + # of a str object may be changed by the garbage collector. + libssl_SSL_set_mode(ss.ssl, + SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -74,7 +74,7 @@ return space.newtuple([w_fileobj, w_filename, w_import_info]) def load_module(space, w_name, w_file, w_filename, w_info): - w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info) + w_suffix, w_filemode, w_modtype = space.unpackiterable(w_info, 3) filename = space.str0_w(w_filename) filemode = space.str_w(w_filemode) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -679,6 +679,10 @@ assert module.__name__ == 'a' assert module.__file__ == 'invalid_path_name' + def test_crash_load_module(self): + import imp + raises(ValueError, imp.load_module, "", "", "", [1, 2, 3, 4]) + class TestAbi: def test_abi_tag(self): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -280,7 +280,7 @@ backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): - return rffi.cast(lltype.Signed, self.storage) + return rffi.cast(lltype.Signed, self.storage) + self.start def get_storage(self): return self.storage diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -318,7 +318,7 @@ if not base.issequence_w(space, w_shape): w_shape = space.newtuple([w_shape,]) else: - w_fldname, w_flddesc = space.fixedview(w_elem) + w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) fldname = space.str_w(w_fldname) if fldname in fields: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2212,6 +2212,11 @@ a = a[::2] i = a.__array_interface__ assert isinstance(i['data'][0], int) + b = array(range(9), dtype=int) + c = b[3:5] + b_data = b.__array_interface__['data'][0] + c_data = c.__array_interface__['data'][0] + assert b_data + 3 * b.dtype.itemsize == c_data def test_array_indexing_one_elem(self): from numpypy import array, arange diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -17,12 +17,13 @@ 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', 'ttyname', 'uname', 'wait', 'wait3', 'wait4' - ] +] # the Win32 urandom implementation isn't going to translate on JVM or CLI so # we have to remove it lltype_only_defs.append('urandom') + class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -32,20 +33,21 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'fdopen' : 'app_posix.fdopen', - 'tmpfile' : 'app_posix.tmpfile', - 'popen' : 'app_posix.popen', - 'tmpnam' : 'app_posix.tmpnam', - 'tempnam' : 'app_posix.tempnam', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'fdopen': 'app_posix.fdopen', + 'tmpfile': 'app_posix.tmpfile', + 'popen': 'app_posix.popen', + 'tmpnam': 'app_posix.tmpnam', + 'tempnam': 'app_posix.tempnam', } if os.name == 'nt': appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -53,44 +55,46 @@ appleveldefs['wait3'] = 'app_posix.wait3' if hasattr(os, 'wait4'): appleveldefs['wait4'] = 'app_posix.wait4' - + interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', + '_statfields': 'interp_posix.getstatfields(space)', + 'kill': 'interp_posix.kill', + 'abort': 'interp_posix.abort', + 'urandom': 'interp_posix.urandom', } if hasattr(os, 'chown'): @@ -167,9 +171,9 @@ interpleveldefs['getlogin'] = 'interp_posix.getlogin' for name in ['setsid', 'getuid', 'geteuid', 'getgid', 'getegid', 'setuid', - 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', - 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', - 'setregid', 'getsid', 'setsid']: + 'seteuid', 'setgid', 'setegid', 'getgroups', 'getpgrp', + 'setpgrp', 'getppid', 'getpgid', 'setpgid', 'setreuid', + 'setregid', 'getsid', 'setsid', 'fstatvfs', 'statvfs']: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.%s' % (name,) # not visible via os, inconsistency in nt: @@ -177,7 +181,7 @@ interpleveldefs['_getfullpathname'] = 'interp_posix._getfullpathname' if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - + for name in RegisterOs.w_star: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name @@ -186,7 +190,7 @@ # if it's an ootype translation, remove all the defs that are lltype # only backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm': + if backend == 'cli' or backend == 'jvm' : for name in lltype_only_defs: self.interpleveldefs.pop(name, None) MixedModule.__init__(self, space, w_name) @@ -194,7 +198,7 @@ def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) - + for constant in dir(os): value = getattr(os, constant) if constant.isupper() and type(value) is int: diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -65,6 +65,23 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] + +class statvfs_result: + __metaclass__ = structseqtype + + name = osname + ".statvfs_result" + + f_bsize = structseqfield(0) + f_frsize = structseqfield(1) + f_blocks = structseqfield(2) + f_bfree = structseqfield(3) + f_bavail = structseqfield(4) + f_files = structseqfield(5) + f_ffree = structseqfield(6) + f_favail = structseqfield(7) + f_flag = structseqfield(8) + f_namemax = structseqfield(9) + if osname == 'posix': # POSIX: we want to check the file descriptor when fdopen() is called, # not later when we read or write data. So we call fstat(), letting diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,15 +1,17 @@ -from pypy.interpreter.gateway import unwrap_spec +import os +import sys + from rpython.rlib import rposix, objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.module import ll_os_stat +from rpython.rtyper.module.ll_os import RegisterOs + +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 -from rpython.rtyper.module.ll_os import RegisterOs -from rpython.rtyper.module import ll_os_stat from pypy.module.sys.interp_encoding import getfilesystemencoding -import os -import sys _WIN32 = sys.platform == 'win32' if _WIN32: @@ -213,6 +215,7 @@ STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) PORTABLE_STAT_FIELDS = unrolling_iterable( enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): if space.config.translation.type_system == 'ootype': @@ -253,6 +256,16 @@ space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) + +def build_statvfs_result(space, st): + vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + for i, (name, _) in STATVFS_FIELDS: + vals_w[i] = space.wrap(getattr(st, name)) + w_tuple = space.newtuple(vals_w) + w_statvfs_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('statvfs_result')) + return space.call_function(w_statvfs_result, w_tuple) + + @unwrap_spec(fd=c_int) def fstat(space, fd): """Perform a stat system call on the file referenced to by an open @@ -314,6 +327,26 @@ else: state.stat_float_times = space.bool_w(w_value) + + at unwrap_spec(fd=c_int) +def fstatvfs(space, fd): + try: + st = os.fstatvfs(fd) + except OSError as e: + raise wrap_oserror(space, e) + else: + return build_statvfs_result(space, st) + + +def statvfs(space, w_path): + try: + st = dispatch_filename(rposix.statvfs)(space, w_path) + except OSError as e: + raise wrap_oserror2(space, e, w_path) + else: + return build_statvfs_result(space, st) + + @unwrap_spec(fd=c_int) def dup(space, fd): """Create a copy of the file descriptor. Return the new file diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -169,7 +169,8 @@ assert stat.S_ISDIR(st.st_mode) def test_stat_exception(self): - import sys, errno + import sys + import errno for fn in [self.posix.stat, self.posix.lstat]: try: fn("nonexistentdir/nonexistentfile") @@ -183,6 +184,15 @@ assert isinstance(e, WindowsError) assert e.winerror == 3 + def test_statvfs(self): + st = self.posix.statvfs(".") + assert isinstance(st, self.posix.statvfs_result) + for field in [ + 'f_bsize', 'f_frsize', 'f_blocks', 'f_bfree', 'f_bavail', + 'f_files', 'f_ffree', 'f_favail', 'f_flag', 'f_namemax', + ]: + assert hasattr(st, field) + def test_pickle(self): import pickle, os st = self.posix.stat(os.curdir) diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -125,6 +125,9 @@ self.llbox = llbox def descr_getint(self, space): + if not jit_hooks.box_isint(self.llbox): + raise OperationError(space.w_NotImplementedError, + space.wrap("Box has no int value")) return space.wrap(jit_hooks.box_getint(self.llbox)) @unwrap_spec(no=int) @@ -182,7 +185,12 @@ @unwrap_spec(no=int) def descr_getarg(self, space, no): - return WrappedBox(jit_hooks.resop_getarg(self.op, no)) + try: + box = jit_hooks.resop_getarg(self.op, no) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("Index out of range")) + return WrappedBox(box) @unwrap_spec(no=int, w_box=WrappedBox) def descr_setarg(self, space, no, w_box): @@ -232,7 +240,8 @@ getarg = interp2app(WrappedOp.descr_getarg), setarg = interp2app(WrappedOp.descr_setarg), result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult) + WrappedOp.descr_setresult), + offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.acceptable_as_base_class = False @@ -342,6 +351,10 @@ doc="bridge number (if a bridge)"), type = interp_attrproperty('type', cls=W_JitLoopInfo, doc="Loop type"), + asmaddr = interp_attrproperty('asmaddr', cls=W_JitLoopInfo, + doc="Address of machine code"), + asmlen = interp_attrproperty('asmlen', cls=W_JitLoopInfo, + doc="Length of machine code"), __repr__ = interp2app(W_JitLoopInfo.descr_repr), ) W_JitLoopInfo.acceptable_as_base_class = False diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -71,7 +71,7 @@ greenkey) di_loop_optimize = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'loop', greenkey) - di_loop.asminfo = AsmInfo(offset, 0, 0) + di_loop.asminfo = AsmInfo(offset, 0x42, 12) di_bridge = JitDebugInfo(MockJitDriverSD, logger, JitCellToken(), oplist, 'bridge', fail_descr=BasicFailDescr()) di_bridge.asminfo = AsmInfo(offset, 0, 0) @@ -123,6 +123,8 @@ assert info.greenkey[2] == False assert info.loop_no == 0 assert info.type == 'loop' + assert info.asmaddr == 0x42 + assert info.asmlen == 12 raises(TypeError, 'info.bridge_no') assert len(info.operations) == 4 int_add = info.operations[0] @@ -132,8 +134,10 @@ assert dmp.greenkey == (self.f.func_code, 0, False) assert dmp.call_depth == 0 assert dmp.call_id == 0 + assert dmp.offset == -1 assert int_add.name == 'int_add' assert int_add.num == self.int_add_num + assert int_add.offset == 0 self.on_compile_bridge() expected = ('>' % repr(self.f.func_code)) @@ -160,6 +164,20 @@ assert 'jit hook' in s.getvalue() assert 'ZeroDivisionError' in s.getvalue() + def test_on_compile_crashes(self): + import pypyjit + loops = [] + def hook(loop): + loops.append(loop) + pypyjit.set_compile_hook(hook) + self.on_compile() + loop = loops[0] + op = loop.operations[2] + # Should not crash the interpreter + raises(IndexError, op.getarg, 2) + assert op.name == 'guard_nonnull' + raises(NotImplementedError, op.getarg(0).getint) + def test_non_reentrant(self): import pypyjit l = [] diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -43,6 +43,7 @@ assert isinstance(res, str) rctime.ctime(rctime.time()) raises(ValueError, rctime.ctime, 1E200) + raises(OverflowError, rctime.ctime, 10**900) def test_gmtime(self): import time as rctime diff --git a/pypy/module/test_lib_pypy/support.py b/pypy/module/test_lib_pypy/support.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/support.py @@ -0,0 +1,33 @@ +import py + +from pypy.conftest import option +from pypy.interpreter.error import OperationError + +def import_lib_pypy(space, name, skipmsg=None): + """Import a top level module ensuring it's sourced from the lib_pypy + package. + + Raises a pytest Skip on ImportError if a skip message was specified. + """ + if option.runappdirect: + try: + mod = __import__('lib_pypy.' + name) + except ImportError as e: + if skipmsg is not None: + py.test.skip('%s (%s))' % (skipmsg, str(e))) + raise + return getattr(mod, name) + + try: + # app-level import should find it from the right place (we + # assert so afterwards) as long as a builtin module doesn't + # overshadow it + failed = ("%s didn't import from lib_pypy. Is a usemodules directive " + "overshadowing it?" % name) + importline = ("(): import %s; assert 'lib_pypy' in %s.__file__, %r; " + "return %s" % (name, name, failed, name)) + return space.appexec([], importline) + except OperationError as e: + if skipmsg is None or not e.match(space, space.w_ImportError): + raise + py.test.skip('%s (%s))' % (skipmsg, str(e))) diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py --- a/pypy/module/test_lib_pypy/test_collections.py +++ b/pypy/module/test_lib_pypy/test_collections.py @@ -2,44 +2,51 @@ Extra tests for the pure Python PyPy _collections module (not used in normal PyPy's) """ +from pypy.module.test_lib_pypy.support import import_lib_pypy -from __future__ import absolute_import -from lib_pypy import _collections as collections -import py -class TestDeque: - def setup_method(self, method): - self.n = 10 - self.d = collections.deque(range(self.n)) +class AppTestDeque: + + def setup_class(cls): + space = cls.space + cls.w_collections = import_lib_pypy(space, '_collections') + cls.w_n = space.wrap(10) + + def w_get_deque(self): + return self.collections.deque(range(self.n)) def test_deque(self): - assert len(self.d) == self.n + d = self.get_deque() + assert len(d) == self.n for i in range(self.n): - assert i == self.d[i] + assert i == d[i] for i in range(self.n-1, -1, -1): - assert self.d.pop() == i - assert len(self.d) == 0 + assert d.pop() == i + assert len(d) == 0 def test_deque_iter(self): - it = iter(self.d) - py.test.raises(TypeError, len, it) + d = self.get_deque() + it = iter(d) + raises(TypeError, len, it) assert it.next() == 0 - self.d.pop() - py.test.raises(RuntimeError, it.next) + d.pop() + raises(RuntimeError, it.next) def test_deque_reversed(self): - it = reversed(self.d) - py.test.raises(TypeError, len, it) + d = self.get_deque() + it = reversed(d) + raises(TypeError, len, it) assert it.next() == self.n-1 assert it.next() == self.n-2 - self.d.pop() - py.test.raises(RuntimeError, it.next) + d.pop() + raises(RuntimeError, it.next) def test_deque_remove(self): - d = self.d - py.test.raises(ValueError, d.remove, "foobar") + d = self.get_deque() + raises(ValueError, d.remove, "foobar") def test_mutate_during_remove(self): + collections = self.collections # Handle evil mutator class MutateCmp: def __init__(self, deque, result): @@ -52,24 +59,33 @@ for match in (True, False): d = collections.deque(['ab']) d.extend([MutateCmp(d, match), 'c']) - py.test.raises(IndexError, d.remove, 'c') + raises(IndexError, d.remove, 'c') assert len(d) == 0 -class TestDequeExtra: +class AppTestDequeExtra: + + spaceconfig = dict(usemodules=('binascii', 'struct',)) + + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_remove_empty(self): + collections = self.collections d = collections.deque([]) - py.test.raises(ValueError, d.remove, 1) + raises(ValueError, d.remove, 1) def test_remove_mutating(self): + collections = self.collections class MutatingCmp(object): def __eq__(self, other): d.clear() return True d = collections.deque([MutatingCmp()]) - py.test.raises(IndexError, d.remove, 1) + raises(IndexError, d.remove, 1) def test_remove_failing(self): + collections = self.collections class FailingCmp(object): def __eq__(self, other): assert False @@ -77,10 +93,11 @@ f = FailingCmp() d = collections.deque([1, 2, 3, f, 4, 5]) d.remove(3) - py.test.raises(AssertionError, d.remove, 4) + raises(AssertionError, d.remove, 4) assert d == collections.deque([1, 2, f, 4, 5]) def test_maxlen(self): + collections = self.collections d = collections.deque([], 3) d.append(1); d.append(2); d.append(3); d.append(4) assert list(d) == [2, 3, 4] @@ -95,11 +112,13 @@ assert repr(d3) == "deque([2, 3, 4], maxlen=3)" def test_count(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) assert d.count(2) == 3 assert d.count(4) == 0 def test_reverse(self): + collections = self.collections d = collections.deque([1, 2, 2, 3, 2]) d.reverse() assert list(d) == [2, 3, 2, 2, 1] @@ -109,6 +128,7 @@ assert list(d) == range(99, -1, -1) def test_subclass_with_kwargs(self): + collections = self.collections class SubclassWithKwargs(collections.deque): def __init__(self, newarg=1): collections.deque.__init__(self) @@ -116,11 +136,13 @@ # SF bug #1486663 -- this used to erroneously raise a TypeError SubclassWithKwargs(newarg=1) -def foobar(): - return list +class AppTestDefaultDict: -class TestDefaultDict: + def setup_class(cls): + cls.w_collections = import_lib_pypy(cls.space, '_collections') + def test_basic(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory is None d1.default_factory = list @@ -148,20 +170,23 @@ assert 12 not in d2.keys() d2.default_factory = None assert d2.default_factory == None - py.test.raises(KeyError, d2.__getitem__, 15) - py.test.raises(TypeError, collections.defaultdict, 1) + raises(KeyError, d2.__getitem__, 15) + raises(TypeError, collections.defaultdict, 1) def test_constructor(self): + collections = self.collections assert collections.defaultdict(None) == {} assert collections.defaultdict(None, {1: 2}) == {1: 2} def test_missing(self): + collections = self.collections d1 = collections.defaultdict() - py.test.raises(KeyError, d1.__missing__, 42) + raises(KeyError, d1.__missing__, 42) d1.default_factory = list assert d1.__missing__(42) == [] def test_repr(self): + collections = self.collections d1 = collections.defaultdict() assert d1.default_factory == None assert repr(d1) == "defaultdict(None, {})" @@ -181,6 +206,7 @@ assert repr(d4) == "defaultdict(%s, {14: defaultdict(None, {})})" % repr(int) def test_recursive_repr(self): + collections = self.collections # Issue2045: stack overflow when default_factory is a bound method class sub(collections.defaultdict): def __init__(self): @@ -192,6 +218,7 @@ "defaultdict( Author: Maciej Fijalkowski Branch: Changeset: r65720:74ec2abeb333 Date: 2013-07-27 20:21 +0200 http://bitbucket.org/pypy/pypy/changeset/74ec2abeb333/ Log: some minor improvement to minimark. not sure if worth it. diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -74,7 +74,8 @@ is_rpython_class, has_custom_trace, get_custom_trace, - fast_path_tracing): + fast_path_tracing, + has_gcptr): self.getfinalizer = getfinalizer self.getlightfinalizer = getlightfinalizer self.is_varsize = is_varsize @@ -92,6 +93,7 @@ self.has_custom_trace = has_custom_trace self.get_custom_trace = get_custom_trace self.fast_path_tracing = fast_path_tracing + self.has_gcptr = has_gcptr def get_member_index(self, type_id): return self.member_index(type_id) diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1511,6 +1511,7 @@ # replace the old object's content with the target address. # A bit of no-ops to convince llarena that we are changing # the layout, in non-translated versions. + typeid = self.get_type_id(obj) obj = llarena.getfakearenaaddress(obj) llarena.arena_reset(obj - size_gc_header, totalsize, 0) llarena.arena_reserve(obj - size_gc_header, @@ -1526,7 +1527,9 @@ # because it can contain further pointers to other young objects. # We will fix such references to point to the copy of the young # objects when we walk 'old_objects_pointing_to_young'. - self.old_objects_pointing_to_young.append(newobj) + if self.has_gcptr(typeid): + # we only have to do it if we have any gcptrs + self.old_objects_pointing_to_young.append(newobj) _trace_drag_out._always_inline_ = True def _visit_young_rawmalloced_object(self, obj): @@ -1815,6 +1818,8 @@ # # It's the first time. We set the flag. hdr.tid |= GCFLAG_VISITED + if not self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)): + return # # Trace the content of the object and put all objects it references # into the 'objects_to_trace' list. diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -75,6 +75,10 @@ infobits = self.get(typeid).infobits return (infobits & T_HAS_GCPTR_IN_VARSIZE) != 0 + def q_has_gcptr(self, typeid): + infobits = self.get(typeid).infobits + return (infobits & T_HAS_GCPTR) != 0 + def q_is_gcarrayofgcptr(self, typeid): infobits = self.get(typeid).infobits return (infobits & T_IS_GCARRAY_OF_GCPTR) != 0 @@ -162,7 +166,8 @@ self.q_is_rpython_class, self.q_has_custom_trace, self.q_get_custom_trace, - self.q_fast_path_tracing) + self.q_fast_path_tracing, + self.q_has_gcptr) # the lowest 16bits are used to store group member index @@ -175,7 +180,8 @@ T_HAS_FINALIZER = 0x200000 T_HAS_CUSTOM_TRACE = 0x400000 T_HAS_LIGHTWEIGHT_FINALIZER = 0x800000 -T_KEY_MASK = intmask(0xFF000000) +T_HAS_GCPTR = 0x1000000 +T_KEY_MASK = intmask(0xFE000000) # bug detection only T_KEY_VALUE = intmask(0x5A000000) # bug detection only def _check_valid_type_info(p): @@ -250,6 +256,8 @@ infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE + if infobits | T_HAS_GCPTR_IN_VARSIZE or offsets: + infobits |= T_HAS_GCPTR info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ From noreply at buildbot.pypy.org Sat Jul 27 21:03:23 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 27 Jul 2013 21:03:23 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Test and fix for u''.join([]). Message-ID: <20130727190323.7724E1C0113@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65721:350779843103 Date: 2013-07-27 19:58 +0200 http://bitbucket.org/pypy/pypy/changeset/350779843103/ Log: Test and fix for u''.join([]). diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -905,3 +905,14 @@ res = 'one!two!three!'.replace(u'!', u'@', 1) assert res == u'one at two!three!' assert type(res) == unicode + + def test_join_subclass(self): + class UnicodeSubclass(unicode): + pass + class StrSubclass(str): + pass + + s1 = UnicodeSubclass(u'a') + assert u''.join([s1]) is not s1 + s2 = StrSubclass(u'a') + assert u''.join([s2]) is not s2 diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -196,8 +196,8 @@ return space.is_w(space.type(w_obj), space.w_unicode) def _join_check_item(self, space, w_obj): - if (space.is_w(space.type(w_obj), space.w_str) or - space.is_w(space.type(w_obj), space.w_unicode)): + if (space.isinstance_w(w_obj, space.w_str) or + space.isinstance_w(w_obj, space.w_unicode)): return 0 return 1 From noreply at buildbot.pypy.org Sat Jul 27 21:36:40 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 27 Jul 2013 21:36:40 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20130727193640.026A51C0113@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65722:807a599758e3 Date: 2013-07-27 22:35 +0300 http://bitbucket.org/pypy/pypy/changeset/807a599758e3/ Log: typo diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,5 +44,5 @@ .. branch: statvfs_tests Added some addition tests for statvfs. -.. branch: ndarray-subtypes +.. branch: ndarray-subtype Allow subclassing ndarray, i.e. matrix From noreply at buildbot.pypy.org Sun Jul 28 00:22:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 00:22:33 +0200 (CEST) Subject: [pypy-commit] cffi default: Document this Message-ID: <20130727222233.549B21C0113@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1296:9a7fc13aa01f Date: 2013-07-28 00:22 +0200 http://bitbucket.org/cffi/cffi/changeset/9a7fc13aa01f/ Log: Document this diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1409,8 +1409,9 @@ cdata object that is a "view" of all items from ``start`` to ``stop``. It is a cdata of type "array" (so e.g. passing it as an argument to a C function would just convert it to a pointer to the ``start`` item). - It makes cdata's of type "array" behave more like a Python list, but - as with indexing, negative bounds mean really negative indices, like in + This makes cdata's of type "array" behave more like a Python list, but + ``start`` and ``stop`` are not optional and a ``step`` is not supported. + As with indexing, negative bounds mean really negative indices, like in C. As for slice assignment, it accepts any iterable, including a list of items or another array-like cdata object, but the length must match. (Note that this behavior differs from initialization: e.g. if you pass From noreply at buildbot.pypy.org Sun Jul 28 00:30:42 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 28 Jul 2013 00:30:42 +0200 (CEST) Subject: [pypy-commit] pypy pypy3-release-2.1.x: fix test_whatsnew Message-ID: <20130727223042.0DBBA1C0113@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: pypy3-release-2.1.x Changeset: r65723:d673441b1ba9 Date: 2013-07-27 12:42 -0700 http://bitbucket.org/pypy/pypy/changeset/d673441b1ba9/ Log: fix test_whatsnew diff --git a/pypy/doc/whatsnew-pypy3-2.1.0-beta1.rst b/pypy/doc/whatsnew-pypy3-2.1.0-beta1.rst --- a/pypy/doc/whatsnew-pypy3-2.1.0-beta1.rst +++ b/pypy/doc/whatsnew-pypy3-2.1.0-beta1.rst @@ -1,3 +1,6 @@ ========================= What's new in PyPy3 2.1.0 ========================= + +.. this is a revision shortly after pypy3-release-2.1.x +.. startrev: 1fc106b34e94 From noreply at buildbot.pypy.org Sun Jul 28 00:30:43 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 28 Jul 2013 00:30:43 +0200 (CEST) Subject: [pypy-commit] pypy default: more %N for the sake of py3k Message-ID: <20130727223043.6FB491C0113@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65724:2d035ae827dd Date: 2013-07-27 15:29 -0700 http://bitbucket.org/pypy/pypy/changeset/2d035ae827dd/ Log: more %N for the sake of py3k diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -414,8 +414,8 @@ closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): raise operationerrfmt(space.w_ValueError, - "%s() requires a code object with %d free vars, not %d", - self.name, closure_len, len(code.co_freevars)) + "%N() requires a code object with %d free vars, not %d", + self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code @@ -482,7 +482,6 @@ space.abstract_isinstance_w(w_firstarg, self.w_class)): pass # ok else: - myname = self.getname(space, "") clsdescr = self.w_class.getname(space, "") if clsdescr: clsdescr += " instance" @@ -497,10 +496,10 @@ instdescr = instname + " instance" else: instdescr = "instance" - msg = ("unbound method %s() must be called with %s " + msg = ("unbound method %N() must be called with %s " "as first argument (got %s instead)") raise operationerrfmt(space.w_TypeError, msg, - myname, clsdescr, instdescr) + self, clsdescr, instdescr) return space.call_args(self.w_function, args) def descr_method_get(self, w_obj, w_cls=None): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -553,9 +553,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%s' for '%s' objects doesn't apply to '%T' object" + m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" raise operationerrfmt(space.w_TypeError, m, - self.name, self.w_cls.name, w_obj) + self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -98,8 +98,8 @@ space = self.space w_type = self.unerase(w_dict.dstorage) if not w_type.is_heaptype(): - msg = "can't clear dictionary of type '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) + msg = "can't clear dictionary of type '%N'" + raise operationerrfmt(space.w_TypeError, msg, w_type) w_type.dict_w.clear() w_type.mutated(None) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -265,8 +265,8 @@ def setdictvalue(w_self, space, name, w_value): if not w_self.is_heaptype(): - msg = "can't set attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_self.name) + msg = "can't set attributes on type object '%N'" + raise operationerrfmt(space.w_TypeError, msg, w_self) if name == "__del__" and name not in w_self.dict_w: msg = ("a __del__ method added to an existing type will not be " "called") @@ -289,8 +289,8 @@ if w_self.lazyloaders: w_self._cleanup_() # force un-lazification if not w_self.is_heaptype(): - msg = "can't delete attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_self.name) + msg = "can't delete attributes on type object '%N'" + raise operationerrfmt(space.w_TypeError, msg, w_self) try: del w_self.dict_w[key] except KeyError: @@ -417,12 +417,12 @@ w_subtype) if not w_subtype.issubtype(w_self): raise operationerrfmt(space.w_TypeError, - "%s.__new__(%s): %s is not a subtype of %s", - w_self.name, w_subtype.name, w_subtype.name, w_self.name) + "%N.__new__(%N): %N is not a subtype of %N", + w_self, w_subtype, w_subtype, w_self) if w_self.instancetypedef is not w_subtype.instancetypedef: raise operationerrfmt(space.w_TypeError, - "%s.__new__(%s) is not safe, use %s.__new__()", - w_self.name, w_subtype.name, w_subtype.name) + "%N.__new__(%N) is not safe, use %N.__new__()", + w_self, w_subtype, w_subtype) return w_subtype def _cleanup_(w_self): @@ -630,7 +630,7 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, - "can't set %s.__name__", w_type.name) + "can't set %N.__name__", w_type) w_type.name = space.str_w(w_value) def descr_get__mro__(space, w_type): @@ -662,16 +662,16 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, - "can't set %s.__bases__", w_type.name) + "can't set %N.__bases__", w_type) if not space.isinstance_w(w_value, space.w_tuple): raise operationerrfmt(space.w_TypeError, - "can only assign tuple to %s.__bases__, not %T", - w_type.name, w_value) + "can only assign tuple to %N.__bases__, not %T", + w_type, w_value) newbases_w = space.fixedview(w_value) if len(newbases_w) == 0: raise operationerrfmt(space.w_TypeError, - "can only assign non-empty tuple to %s.__bases__, not ()", - w_type.name) + "can only assign non-empty tuple to %N.__bases__, not ()", + w_type) for w_newbase in newbases_w: if isinstance(w_newbase, W_TypeObject): @@ -884,9 +884,9 @@ "only classic bases")) if not w_bestbase.instancetypedef.acceptable_as_base_class: raise operationerrfmt(space.w_TypeError, - "type '%s' is not an " + "type '%N' is not an " "acceptable base class", - w_bestbase.instancetypedef.name) + w_bestbase) # check that all other bases' layouts are superclasses of the bestbase w_bestlayout = w_bestbase.w_same_layout_as or w_bestbase @@ -1136,8 +1136,8 @@ if w_descr is not None: return space.get(w_descr, w_type) raise operationerrfmt(space.w_AttributeError, - "type object '%s' has no attribute '%s'", - w_type.name, name) + "type object '%N' has no attribute %R", + w_type, w_name) def eq__Type_Type(space, w_self, w_other): return space.is_(w_self, w_other) From noreply at buildbot.pypy.org Sun Jul 28 00:46:15 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 28 Jul 2013 00:46:15 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130727224615.B49BF1C0113@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65725:bef06e42affa Date: 2013-07-27 15:42 -0700 http://bitbucket.org/pypy/pypy/changeset/bef06e42affa/ Log: merge default diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -5,6 +5,10 @@ We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + Highlights ========== diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,6 @@ .. branch: statvfs_tests Added some addition tests for statvfs. + +.. branch: ndarray-subtype +Allow subclassing ndarray, i.e. matrix diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -434,8 +434,8 @@ closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): raise operationerrfmt(space.w_ValueError, - "%s() requires a code object with %d free vars, not %d", - self.name, closure_len, len(code.co_freevars)) + "%N() requires a code object with %d free vars, not %d", + self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -565,9 +565,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%s' for '%s' objects doesn't apply to '%T' object" + m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" raise operationerrfmt(space.w_TypeError, m, - self.name, self.w_cls.name, w_obj) + self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -229,7 +229,7 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return chunks.apply(orig_arr) + return chunks.apply(space, orig_arr) def descr_setitem(self, space, orig_arr, w_index, w_value): try: @@ -238,7 +238,7 @@ except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) - view = chunks.apply(orig_arr) + view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value) def transpose(self, orig_array): @@ -269,14 +269,14 @@ shape, skip) return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - def swapaxes(self, orig_arr, axis1, axis2): + def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] strides = self.get_strides()[:] backstrides = self.get_backstrides()[:] shape[axis1], shape[axis2] = shape[axis2], shape[axis1] strides[axis1], strides[axis2] = strides[axis2], strides[axis1] backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] - return W_NDimArray.new_slice(self.start, strides, + return W_NDimArray.new_slice(space, self.start, strides, backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): @@ -289,13 +289,16 @@ return ArrayBuffer(self) def astype(self, space, dtype): - new_arr = W_NDimArray.from_shape(self.get_shape(), dtype) + strides, backstrides = support.calc_strides(self.get_shape(), dtype, + self.order) + impl = ConcreteArray(self.get_shape(), dtype, self.order, + strides, backstrides) if self.dtype.is_str_or_unicode() and not dtype.is_str_or_unicode(): raise OperationError(space.w_NotImplementedError, space.wrap( "astype(%s) not implemented yet" % self.dtype)) else: - loop.setslice(space, new_arr.get_shape(), new_arr.implementation, self) - return new_arr + loop.setslice(space, impl.get_shape(), impl, self) + return impl class ConcreteArrayNotOwning(BaseConcreteArray): diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -139,7 +139,7 @@ if not new_shape: return self if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(new_shape, self.dtype) + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) arr_iter.setitem(self.value) return arr.implementation @@ -152,7 +152,7 @@ def create_axis_iter(self, shape, dim, cum): raise Exception("axis iter should not happen on scalar") - def swapaxes(self, orig_array, axis1, axis2): + def swapaxes(self, space, orig_array, axis1, axis2): raise Exception("should not be called") def fill(self, w_value): @@ -166,7 +166,7 @@ return space.wrap(0) def astype(self, space, dtype): - return W_NDimArray.new_scalar(space, dtype, self.value) + raise Exception("should not be called") def base(self): return None diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -126,7 +126,7 @@ axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(arr.get_shape(), dtype) + index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -10,6 +10,15 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) +def wrap_impl(space, w_cls, w_instance, impl): + if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): + w_ret = W_NDimArray(impl) + else: + w_ret = space.allocate_instance(W_NDimArray, w_cls) + W_NDimArray.__init__(w_ret, impl) + assert isinstance(w_ret, W_NDimArray) + space.call_method(w_ret, '__array_finalize__', w_instance) + return w_ret class ArrayArgumentException(Exception): pass @@ -20,10 +29,11 @@ def __init__(self, implementation): assert isinstance(implementation, BaseArrayImplementation) + assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod - def from_shape(shape, dtype, order='C'): + def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -32,10 +42,12 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) + if w_instance: + return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(shape, storage, dtype, order='C', owning=False): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -46,15 +58,20 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) + if w_subtype: + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_method(w_ret, '__array_finalize__', w_subtype) + return w_ret return W_NDimArray(impl) @staticmethod - def new_slice(offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): from pypy.module.micronumpy.arrayimpl import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) - return W_NDimArray(impl) + return wrap_impl(space, space.type(orig_arr), orig_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -35,10 +35,12 @@ class BadToken(Exception): pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -184,14 +186,23 @@ def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) - return False - #return w_obj.boolval + return w_obj.boolval def is_w(self, w_obj, w_what): return w_obj is w_what + def issubtype(self, w_type1, w_type2): + return BoolObject(True) + def type(self, w_obj): - return w_obj.tp + if self.is_none(w_obj): + return self.w_None + try: + return w_obj.tp + except AttributeError: + if isinstance(w_obj, W_NDimArray): + return W_NDimArray + return self.w_None def gettypefor(self, w_obj): return None @@ -199,6 +210,11 @@ def call_function(self, tp, w_dtype): return w_dtype + def call_method(self, w_obj, s, *args): + # XXX even the hacks have hacks + return None + #return getattr(w_obj, 'descr_' + s)(self, *args) + @specialize.arg(1) def interp_w(self, tp, what): assert isinstance(what, tp) @@ -329,6 +345,8 @@ self.name = name.strip(" ") def execute(self, interp): + if self.name == 'None': + return None return interp.variables[self.name] def __repr__(self): @@ -451,6 +469,32 @@ def __repr__(self): return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) +class ArrayClass(Node): + def __init__(self): + self.v = W_NDimArray + + def execute(self, interp): + return self.v + + def __repr__(self): + return '' + +class DtypeClass(Node): + def __init__(self, dt): + self.v = dt + + def execute(self, interp): + if self.v == 'int': + dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'float': + dtype = get_dtype_cache(interp.space).w_float64dtype + else: + raise BadToken('unknown v to dtype "%s"' % self.v) + return dtype + + def __repr__(self): + return '' % self.v + class Execute(Node): def __init__(self, expr): self.expr = expr @@ -533,6 +577,14 @@ w_res = where(interp.space, arr, arg1, arg2) else: assert False + elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: + if len(self.args) != 2: + raise ArgumentMismatch + arg = self.args[1].execute(interp) + if self.name == 'view': + w_res = arr.descr_view(interp.space, arg) + else: + assert False else: raise WrongFunctionName if isinstance(w_res, W_NDimArray): @@ -652,8 +704,14 @@ if token.name == 'identifier': if tokens.remaining() and tokens.get(0).name == 'paren_left': stack.append(self.parse_function_call(token.v, tokens)) + elif token.v.strip(' ') == 'ndarray': + stack.append(ArrayClass()) + elif token.v.strip(' ') == 'int': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'float': + stack.append(DtypeClass('float')) else: - stack.append(Variable(token.v)) + stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': stack.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'operator': diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -88,7 +88,7 @@ y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2): @@ -131,7 +131,8 @@ arr.get_dtype()) if _axis < 0 or len(arr.get_shape()) <= _axis: raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) - res = W_NDimArray.from_shape(shape, dtype, 'C') + # concatenate does not handle ndarray subtypes, it always returns a ndarray + res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: @@ -139,7 +140,7 @@ continue chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, arr.get_shape()[_axis]) - Chunks(chunks).apply(res).implementation.setslice(space, arr) + Chunks(chunks).apply(space, res).implementation.setslice(space, arr) axis_start += arr.get_shape()[_axis] return res @@ -150,22 +151,22 @@ arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, - orig_size)]).apply(res).implementation.setslice(space, arr) + orig_size)]).apply(space, w_res).implementation.setslice(space, arr) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) - Chunks(chunks).apply(res).implementation.setslice(space, arr) - return res + Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) + return w_res def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) @@ -261,7 +262,7 @@ else: shape = (shape[:axis2] + shape[axis2 + 1:axis1] + shape[axis1 + 1:] + [size]) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) if size == 0: return out if shapelen == 2: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -41,7 +41,7 @@ dtype = w_arr_list[0].get_dtype() for w_arr in w_arr_list[1:]: dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) - out = base.W_NDimArray.from_shape(shape, dtype) + out = base.W_NDimArray.from_shape(space, shape, dtype) return out diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -64,8 +64,8 @@ base_iter.next_skip_x(start) if length == 1: return base_iter.getitem() - res = W_NDimArray.from_shape([length], base.get_dtype(), - base.get_order()) + res = W_NDimArray.from_shape(space, [length], base.get_dtype(), + base.get_order(), w_instance=base) return loop.flatiter_getitem(res, base_iter, step) def descr_setitem(self, space, w_idx, w_value): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, issequence_w + ArrayArgumentException, issequence_w, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ interp_arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ @@ -85,8 +85,8 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - res = W_NDimArray.from_shape(res_shape, self.get_dtype()) - return loop.getitem_filter(res, self, arr) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) + return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): @@ -145,12 +145,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return chunks.apply(self) + return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] - res = W_NDimArray.from_shape(shape, self.get_dtype(), self.get_order()) - if not res.get_size(): - return res - return loop.getitem_array_int(space, self, res, iter_shape, indexes, + w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), + self.get_order(), w_instance=self) + if not w_res.get_size(): + return w_res + return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, prefix) def setitem_array_int(self, space, w_index, w_value): @@ -161,7 +162,7 @@ # w_index is a list of slices w_value = convert_to_array(space, w_value) chunks = self.implementation._prepare_slice_args(space, w_index) - view = chunks.apply(self) + view = chunks.apply(space, self) view.implementation.setslice(space, w_value) return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, @@ -259,14 +260,17 @@ return self.implementation.get_scalar_value() def descr_copy(self, space): - return W_NDimArray(self.implementation.copy(space)) + copy = self.implementation.copy(space) + w_subtype = space.type(self) + return wrap_impl(space, w_subtype, self, copy) def descr_get_real(self, space): - return W_NDimArray(self.implementation.get_real(self)) + return wrap_impl(space, space.type(self), self, + self.implementation.get_real(self)) def descr_get_imag(self, space): ret = self.implementation.get_imag(self) - return W_NDimArray(ret) + return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): # copy (broadcast) values into self @@ -298,7 +302,7 @@ new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) new_impl = self.implementation.reshape(space, self, new_shape) if new_impl is not None: - return W_NDimArray(new_impl) + return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: @@ -326,7 +330,7 @@ """ if self.is_scalar(): return self - return self.implementation.swapaxes(self, axis1, axis2) + return self.implementation.swapaxes(space, self, axis1, axis2) def descr_tolist(self, space): if len(self.get_shape()) == 0: @@ -446,17 +450,24 @@ # we must do that, because we need a working set. otherwise # we would modify the array in-place. Use this to our advantage # by converting nonnative byte order. + if self.is_scalar(): + return space.wrap(0) s = self.get_dtype().name if not self.get_dtype().native: s = s[1:] dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] contig = self.implementation.astype(space, dtype) - return contig.implementation.argsort(space, w_axis) + return contig.argsort(space, w_axis) def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - return self.implementation.astype(space, dtype) + impl = self.implementation + if isinstance(impl, scalar.Scalar): + return W_NDimArray.new_scalar(space, dtype, impl.value) + else: + new_impl = impl.astype(space, dtype) + return wrap_impl(space, space.type(self), self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -471,9 +482,9 @@ loop.byteswap(self.implementation, self.implementation) return self else: - res = W_NDimArray.from_shape(self.get_shape(), self.get_dtype()) - loop.byteswap(self.implementation, res.implementation) - return res + w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_instance=self) + loop.byteswap(self.implementation, w_res.implementation) + return w_res @unwrap_spec(mode=str) def descr_choose(self, space, w_choices, w_out=None, mode='raise'): @@ -564,7 +575,7 @@ if space.is_none(w_out): if self.get_dtype().is_bool_type(): #numpy promotes bool.round() to float16. Go figure. - w_out = W_NDimArray.from_shape(self.get_shape(), + w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) else: w_out = None @@ -578,6 +589,8 @@ else: calc_dtype = out.get_dtype() + if decimals == 0: + out = out.descr_view(space,space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out @@ -619,9 +632,13 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - if w_type is not None: - raise OperationError(space.w_NotImplementedError, space.wrap( - "view(... type=) not implemented yet")) + if not w_type and w_dtype: + try: + if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + w_type = w_dtype + w_dtype = None + except (OperationError, TypeError): + pass if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -651,8 +668,9 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize - return W_NDimArray(impl.get_view(self, dtype, new_shape)) - + v = impl.get_view(self, dtype, new_shape) + w_ret = wrap_impl(space, w_type, self, v) + return w_ret # --------------------- operations ---------------------------- @@ -760,9 +778,9 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - result = W_NDimArray.from_shape(out_shape, dtype) + w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, result, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) @unwrap_spec(w_axis = WrappedDefault(None)) @@ -884,14 +902,20 @@ isfortran = space.getitem(w_state, space.wrap(3)) storage = space.getitem(w_state, space.wrap(4)) - self.implementation = W_NDimArray.from_shape_and_storage([space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, owning=True).implementation + self.implementation = W_NDimArray.from_shape_and_storage(space, + [space.int_w(i) for i in space.listview(shape)], + rffi.str2charp(space.str_w(storage), track_allocation=False), + dtype, owning=True).implementation + def descr___array_finalize__(self, space, w_obj): + pass - at unwrap_spec(offset=int) + at unwrap_spec(offset=int, order=str) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, w_order=None): + offset=0, w_strides=None, order='C'): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray + from pypy.module.micronumpy.support import calc_strides if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_order) or not space.is_none(w_buffer)): raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) @@ -900,10 +924,19 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype) - return W_NDimArray.from_shape(shape, dtype) + if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): + return W_NDimArray.from_shape(space, shape, dtype, order) + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = ConcreteArray(shape, dtype.base, order, strides, + backstrides) + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_function(space.getattr(w_ret, + space.wrap('__array_finalize__')), w_subtype) + return w_ret @unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype): +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -912,9 +945,17 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), + w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape_and_storage(shape, storage, dtype) + if w_subtype: + if not space.isinstance_w(w_subtype, space.w_type): + raise OperationError(space.w_ValueError, space.wrap( + "subtype must be a subtype of ndarray, not a class instance")) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + 'C', False, w_subtype) + else: + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) W_NDimArray.typedef = TypeDef( "ndarray", @@ -1042,6 +1083,7 @@ W_NDimArray.fdel___pypy_data__), __reduce__ = interp2app(W_NDimArray.descr_reduce), __setstate__ = interp2app(W_NDimArray.descr_setstate), + __array_finalize__ = interp2app(W_NDimArray.descr___array_finalize__), ) @unwrap_spec(ndmin=int, copy=bool, subok=bool) @@ -1094,12 +1136,12 @@ dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape - arr = W_NDimArray.from_shape(shape, dtype, order=order) - arr_iter = arr.create_iter() + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + arr_iter = w_arr.create_iter() for w_elem in elems_w: arr_iter.setitem(dtype.coerce(space, w_elem)) arr_iter.next() - return arr + return w_arr @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): @@ -1109,7 +1151,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(shape, dtype=dtype, order=order)) + return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): @@ -1119,10 +1161,10 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - arr = W_NDimArray.from_shape(shape, dtype=dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) - arr.fill(one) - return space.wrap(arr) + w_arr.fill(one) + return space.wrap(w_arr) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -50,7 +50,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([num_items], dtype=dtype) + a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) ai = a.create_iter() for val in items: ai.setitem(val) @@ -71,7 +71,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([count], dtype=dtype) + a = W_NDimArray.from_shape(space, [count], dtype=dtype) loop.fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -181,7 +181,8 @@ temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() - temp = W_NDimArray.from_shape(temp_shape, dtype) + temp = W_NDimArray.from_shape(space, temp_shape, dtype, + w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -207,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -216,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape([obj.get_size()], dtype) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out @@ -295,7 +296,7 @@ return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) - return loop.call1(shape, self.func, calc_dtype, res_dtype, + return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out) @@ -370,7 +371,7 @@ return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) - return loop.call2(new_shape, self.func, calc_dtype, + return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out) @@ -450,7 +451,7 @@ return dt2 return dt1 return dt2 - else: + else: # increase to the next signed type dtypenum = dt2.num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] @@ -532,7 +533,13 @@ return current_guess if current_guess is complex_type: return complex_type - return interp_dtype.get_dtype_cache(space).w_float64dtype + if space.isinstance_w(w_obj, space.w_float): + return float_type + elif space.isinstance_w(w_obj, space.w_slice): + return long_dtype + raise operationerrfmt(space.w_NotImplementedError, + 'unable to create dtype from objects, ' '"%T" instance not supported', + w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -58,11 +58,11 @@ def __init__(self, name): self.name = name - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(arr.start + ofs, arr.get_strides(), + return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), arr.get_backstrides(), arr.shape, arr, orig_arr, subdtype) @@ -81,13 +81,13 @@ assert s >= 0 return shape[:] + old_shape[s:] - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation shape = self.extend_shape(arr.shape) r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), self.l) _, start, strides, backstrides = r - return W_NDimArray.new_slice(start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, orig_arr) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -19,9 +19,34 @@ reds = ['shape', 'w_lhs', 'w_rhs', 'out', 'left_iter', 'right_iter', 'out_iter']) -def call2(shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): +def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): + # handle array_priority + # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does: + # 1. if __array_priorities__ are equal and one is an ndarray and the + # other is a subtype, flip the order + # 2. elif rhs.__array_priority__ is higher, flip the order + # Now return the subtype of the first one + + w_ndarray = space.gettypefor(W_NDimArray) + lhs_type = space.type(w_lhs) + rhs_type = space.type(w_rhs) + lhs_for_subtype = w_lhs + rhs_for_subtype = w_rhs + #it may be something like a FlatIter, which is not an ndarray + if not space.is_true(space.issubtype(lhs_type, w_ndarray)): + lhs_type = space.type(w_lhs.base) + lhs_for_subtype = w_lhs.base + if not space.is_true(space.issubtype(rhs_type, w_ndarray)): + rhs_type = space.type(w_rhs.base) + rhs_for_subtype = w_rhs.base + if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray): + lhs_for_subtype = rhs_for_subtype + + # TODO handle __array_priorities__ and maybe flip the order + if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, + w_instance=lhs_for_subtype) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -48,9 +73,9 @@ reds = ['shape', 'w_obj', 'out', 'obj_iter', 'out_iter']) -def call1(shape, func, calc_dtype, res_dtype, w_obj, out): +def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) @@ -437,12 +462,12 @@ def tostring(space, arr): builder = StringBuilder() iter = arr.create_iter() - res_str = W_NDimArray.from_shape([1], arr.get_dtype(), order='C') + w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().itemtype.get_element_size() res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - res_str.implementation.get_storage_as_int(space)) + w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): - res_str.implementation.setitem(0, iter.getitem()) + w_res_str.implementation.setitem(0, iter.getitem()) for i in range(itemsize): builder.append(res_str_casted[i]) iter.next() diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -2,7 +2,7 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, - FunctionCall, FakeSpace) + FunctionCall, FakeSpace, W_NDimArray) class TestCompiler(object): @@ -84,6 +84,7 @@ assert interp.code.statements[0] == Assignment( 'a', Operator(Variable('b'), "+", FloatConstant(3))) + class TestRunner(object): def run(self, code): interp = numpy_compile(code) @@ -290,4 +291,32 @@ ''') assert interp.results[0].real == 0 assert interp.results[0].imag == 1 - + + def test_view_none(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = None + c = view(a, b) + c -> 0 + ''') + assert interp.results[0].value == 1 + + def test_view_ndarray(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = ndarray + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + + def test_view_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -975,3 +975,16 @@ assert a[0] == 1 assert (a + a)[1] == 4 +class AppTestObjectDtypes(BaseNumpyAppTest): + def test_scalar_from_object(self): + from numpypy import array + class Polynomial(object): + pass + try: + a = array(Polynomial()) + assert a.shape == () + except NotImplementedError, e: + if e.message.find('unable to create dtype from objects')>=0: + skip('creating ojbect dtype not supported yet') + + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -24,8 +24,8 @@ def get_size(self): return 1 -def create_slice(a, chunks): - return Chunks(chunks).apply(W_NDimArray(a)).implementation +def create_slice(space, a, chunks): + return Chunks(chunks).apply(space, W_NDimArray(a)).implementation def create_array(*args, **kwargs): @@ -46,100 +46,100 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] - a = create_array([1, 0, 7], MockDtype(), order='C') + a = create_array(self.space, [1, 0, 7], MockDtype(), order='C') assert a.strides == [7, 7, 1] assert a.backstrides == [0, 0, 6] def test_create_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -207,7 +207,8 @@ raw_storage_setitem(storage, i, rffi.cast(rffi.UCHAR, i)) # dtypes = get_dtype_cache(self.space) - w_array = W_NDimArray.from_shape_and_storage([2, 2], storage, dtypes.w_int8dtype) + w_array = W_NDimArray.from_shape_and_storage(self.space, [2, 2], + storage, dtypes.w_int8dtype) def get(i, j): return w_array.getitem(self.space, [i, j]).value assert get(0, 0) == 0 @@ -1442,7 +1443,7 @@ assert x.view('int8').shape == (10, 3) def test_ndarray_view_empty(self): - from numpypy import array, int8, int16, dtype + from numpypy import array, int8, int16 x = array([], dtype=[('a', int8), ('b', int8)]) y = x.view(dtype=int16) @@ -2877,6 +2878,12 @@ assert y[0, 1] == 2 y[0, 1] = 42 assert x[1] == 42 + class C(ndarray): + pass + z = ndarray._from_shape_and_storage([4, 1], addr, x.dtype, C) + assert isinstance(z, C) + assert z.shape == (4, 1) + assert z[1, 0] == 42 def test___pypy_data__(self): from numpypy import array @@ -2891,7 +2898,7 @@ class AppTestLongDoubleDtypes(BaseNumpyAppTest): def setup_class(cls): from pypy.module.micronumpy import Module - print dir(Module.interpleveldefs) + #print dir(Module.interpleveldefs) if not Module.interpleveldefs.get('longfloat', None): py.test.skip('no longdouble types yet') BaseNumpyAppTest.setup_class.im_func(cls) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -0,0 +1,223 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestSupport(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_NoNew = cls.space.appexec([], '''(): + from numpypy import ndarray + class NoNew(ndarray): + def __new__(cls, subtype): + raise ValueError('should not call __new__') + def __array_finalize__(self, obj): + + self.called_finalize = True + return NoNew ''') + cls.w_SubType = cls.space.appexec([], '''(): + from numpypy import ndarray, asarray + class SubType(ndarray): + def __new__(obj, input_array): + obj = asarray(input_array).view(obj) + obj.called_new = True + return obj + def __array_finalize__(self, obj): + self.called_finalize = True + return SubType ''') + + def test_subtype_base(self): + from numpypy import ndarray, dtype + class C(ndarray): + def __new__(subtype, shape, dtype): + self = ndarray.__new__(subtype, shape, dtype) + self.id = 'subtype' + return self + a = C([2, 2], int) + assert isinstance(a, C) + assert isinstance(a, ndarray) + assert a.shape == (2, 2) + assert a.dtype is dtype(int) + assert a.id == 'subtype' + a = a.reshape(1, 4) + b = a.reshape(4, 1) + assert isinstance(b, C) + #make sure __new__ was not called + assert not getattr(b, 'id', None) + a.fill(3) + b = a[0] + assert isinstance(b, C) + assert (b == 3).all() + b[0]=100 + assert a[0,0] == 100 + + def test_subtype_view(self): + from numpypy import ndarray, array + class matrix(ndarray): + def __new__(subtype, data, dtype=None, copy=True): + if isinstance(data, matrix): + return data + return data.view(subtype) + a = array(range(5)) + b = matrix(a) + assert isinstance(b, matrix) + assert (b == a).all() + + + def test_finalize(self): + #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray + import numpypy as np + class InfoArray(np.ndarray): + def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + strides=None, order='C', info=None): + obj = np.ndarray.__new__(subtype, shape, dtype, buffer, + offset, strides, order) + obj.info = info + return obj + + def __array_finalize__(self, obj): + if obj is None: + print 'finalize with None' + return + # printing the object itself will crash the test + print 'finalize with something',type(obj) + self.info = getattr(obj, 'info', None) + obj = InfoArray(shape=(3,)) + assert isinstance(obj, InfoArray) + assert obj.info is None + obj = InfoArray(shape=(3,), info='information') + assert obj.info == 'information' + v = obj[1:] + assert isinstance(v, InfoArray) + assert v.base is obj + assert v.info == 'information' + arr = np.arange(10) + cast_arr = arr.view(InfoArray) + assert isinstance(cast_arr, InfoArray) + assert cast_arr.base is arr + assert cast_arr.info is None + + def test_sub_where(self): + from numpypy import where, ones, zeros, array + a = array([1, 2, 3, 0, -3]) + v = a.view(self.NoNew) + b = where(array(v) > 0, ones(5), zeros(5)) + assert (b == [1, 1, 1, 0, 0]).all() + # where returns an ndarray irregardless of the subtype of v + assert not isinstance(b, self.NoNew) + + def test_sub_repeat(self): + from numpypy import repeat, array + a = self.SubType(array([[1, 2], [3, 4]])) + b = repeat(a, 3) + assert (b == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]).all() + assert isinstance(b, self.SubType) + + def test_sub_flatiter(self): + from numpypy import array + a = array(range(9)).reshape(3, 3).view(self.NoNew) + c = array(range(9)).reshape(3, 3) + assert isinstance(a.flat[:] + a.flat[:], self.NoNew) + assert isinstance(a.flat[:] + c.flat[:], self.NoNew) + assert isinstance(c.flat[:] + a.flat[:], self.NoNew) + assert not isinstance(c.flat[:] + c.flat[:], self.NoNew) + + def test_sub_getitem_filter(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + c = b[array([False, True, False, True, False])] + assert c.shape == (2,) + assert (c == [1, 3]).all() + assert isinstance(c, self.SubType) + assert b.called_new + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_getitem_array_int(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + assert b.called_new + c = b[array([3, 2, 1, 4])] + assert (c == [3, 2, 1, 4]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_round(self): + from numpypy import array + a = array(range(10), dtype=float).view(self.NoNew) + # numpy compatibility + b = a.round(decimals=0) + assert isinstance(b, self.NoNew) + b = a.round(decimals=1) + assert not isinstance(b, self.NoNew) + b = a.round(decimals=-1) + assert not isinstance(b, self.NoNew) + + def test_sub_dot(self): + # the returned type is that of the first argument + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = array(range(12)).reshape(4,3).view(self.SubType) + d = c.dot(a) + assert isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert d.called_finalize + d = a.dot(c) + assert not isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert not getattr(d, 'called_finalize', False) + + def test_sub_reduce(self): + # i.e. sum, max + # test for out as well + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = b.sum(axis=0) + assert (c == [12, 15, 18, 21]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + d = array(range(4)) + c = b.sum(axis=0, out=d) + assert c is d + assert not isinstance(c, self.SubType) + d = array(range(4)).view(self.NoNew) + c = b.sum(axis=0, out=d) + assert c is d + assert isinstance(c, self.NoNew) + + def test_sub_call2(self): + # c + a vs. a + c, what about array priority? + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = self.SubType(range(12)) + c = b + a + assert isinstance(c, self.SubType) + c = a + b + assert isinstance(c, self.NoNew) + d = range(12) + e = a - d + assert isinstance(e, self.NoNew) + + def test_sub_call1(self): + from numpypy import array, sqrt + a = array(range(12)).view(self.NoNew) + b = sqrt(a) + assert b.called_finalize == True + + def test_sub_astype(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.astype(float) + assert b.called_finalize == True + + def test_sub_reshape(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.reshape(3, 4) + assert b.called_finalize == True + diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -1,6 +1,8 @@ +import py import re from rpython.tool.logparser import extract_category +from rpython.jit.backend.tool.viewcode import ObjdumpNotFound from pypy.tool.jitlogparser.parser import (import_log, parse_log_counts, mangle_descr) @@ -41,7 +43,10 @@ lib_re = re.compile("file '.*lib-python.*'") for loop in loops: if hasattr(loop, 'force_asm'): - loop.force_asm() + try: + loop.force_asm() + except ObjdumpNotFound: + py.test.skip("ObjDump was not found, skipping") if lib_re.search(loop.comment) or \ lib_re.search(loop.operations[0].repr()): # do not care for _optimize_charset or _mk_bitmap @@ -60,7 +65,7 @@ by_count = lambda l: -l.count is_prime_loops.sort(key=by_count) fn_with_bridges_loops.sort(key=by_count) - + # check that we can find bridges corresponding to " % 3" and " % 5" mod_bridges = [] for op in fn_with_bridges_loops[0].operations: @@ -69,7 +74,7 @@ if bridge is not None: mod_bridges.append(bridge) assert len(mod_bridges) in (1, 2) - + # check that counts are reasonable (precise # may change in the future) assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -92,8 +92,8 @@ space = self.space w_type = self.unerase(w_dict.dstorage) if not w_type.is_heaptype(): - msg = "can't clear dictionary of type '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_type.name) + msg = "can't clear dictionary of type '%N'" + raise operationerrfmt(space.w_TypeError, msg, w_type) w_type.dict_w.clear() w_type.mutated(None) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -263,8 +263,8 @@ def setdictvalue(w_self, space, name, w_value): if not w_self.is_heaptype(): - msg = "can't set attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_self.name) + msg = "can't set attributes on type object '%N'" + raise operationerrfmt(space.w_TypeError, msg, w_self) if name == "__del__" and name not in w_self.dict_w: msg = ("a __del__ method added to an existing type will not be " "called") @@ -287,8 +287,8 @@ if w_self.lazyloaders: w_self._cleanup_() # force un-lazification if not w_self.is_heaptype(): - msg = "can't delete attributes on type object '%s'" - raise operationerrfmt(space.w_TypeError, msg, w_self.name) + msg = "can't delete attributes on type object '%N'" + raise operationerrfmt(space.w_TypeError, msg, w_self) try: del w_self.dict_w[key] except KeyError: @@ -415,12 +415,12 @@ w_subtype) if not w_subtype.issubtype(w_self): raise operationerrfmt(space.w_TypeError, - "%s.__new__(%s): %s is not a subtype of %s", - w_self.name, w_subtype.name, w_subtype.name, w_self.name) + "%N.__new__(%N): %N is not a subtype of %N", + w_self, w_subtype, w_subtype, w_self) if w_self.instancetypedef is not w_subtype.instancetypedef: raise operationerrfmt(space.w_TypeError, - "%s.__new__(%s) is not safe, use %s.__new__()", - w_self.name, w_subtype.name, w_subtype.name) + "%N.__new__(%N) is not safe, use %N.__new__()", + w_self, w_subtype, w_subtype) return w_subtype def _cleanup_(w_self): @@ -628,7 +628,7 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, - "can't set %s.__name__", w_type.name) + "can't set %N.__name__", w_type) w_type.name = space.str_w(w_value) def descr_get__mro__(space, w_type): @@ -660,16 +660,16 @@ w_type = _check(space, w_type) if not w_type.is_heaptype(): raise operationerrfmt(space.w_TypeError, - "can't set %s.__bases__", w_type.name) + "can't set %N.__bases__", w_type) if not space.isinstance_w(w_value, space.w_tuple): raise operationerrfmt(space.w_TypeError, - "can only assign tuple to %s.__bases__, not %T", - w_type.name, w_value) + "can only assign tuple to %N.__bases__, not %T", + w_type, w_value) newbases_w = space.fixedview(w_value) if len(newbases_w) == 0: raise operationerrfmt(space.w_TypeError, - "can only assign non-empty tuple to %s.__bases__, not ()", - w_type.name) + "can only assign non-empty tuple to %N.__bases__, not ()", + w_type) for w_newbase in newbases_w: if isinstance(w_newbase, W_TypeObject): @@ -886,9 +886,9 @@ "only classic bases")) if not w_bestbase.instancetypedef.acceptable_as_base_class: raise operationerrfmt(space.w_TypeError, - "type '%s' is not an " + "type '%N' is not an " "acceptable base class", - w_bestbase.instancetypedef.name) + w_bestbase) # check that all other bases' layouts are superclasses of the bestbase w_bestlayout = w_bestbase.w_same_layout_as or w_bestbase @@ -1142,8 +1142,8 @@ if w_descr is not None: return space.get(w_descr, w_type) raise operationerrfmt(space.w_AttributeError, - "type object '%s' has no attribute '%s'", - w_type.name, name) + "type object '%N' has no attribute %R", + w_type, w_name) # ____________________________________________________________ diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -36,6 +36,9 @@ if sys.platform == "win32": pass # lots more in Psyco +class ObjdumpNotFound(Exception): + pass + def find_objdump(): exe = ('objdump', 'gobjdump') path = os.environ['PATH'].split(os.pathsep) @@ -45,7 +48,7 @@ if not os.path.exists(path_to): continue return e - raise AssertionError('(g)objdump was not found in PATH') + raise ObjdumpNotFound('(g)objdump was not found in PATH') def machine_code_dump(data, originaddr, backend_name, label_list=None): objdump_backend_option = { @@ -326,7 +329,7 @@ color = "black" else: color = "red" - g1.emit_edge('N_%x' % r.addr, 'N_%x' % targetaddr, + g1.emit_edge('N_%x' % r.addr, 'N_%x' % targetaddr, color=color) sys.stdout.flush() if showgraph: diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -74,7 +74,8 @@ is_rpython_class, has_custom_trace, get_custom_trace, - fast_path_tracing): + fast_path_tracing, + has_gcptr): self.getfinalizer = getfinalizer self.getlightfinalizer = getlightfinalizer self.is_varsize = is_varsize @@ -92,6 +93,7 @@ self.has_custom_trace = has_custom_trace self.get_custom_trace = get_custom_trace self.fast_path_tracing = fast_path_tracing + self.has_gcptr = has_gcptr def get_member_index(self, type_id): return self.member_index(type_id) diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1511,6 +1511,7 @@ # replace the old object's content with the target address. # A bit of no-ops to convince llarena that we are changing # the layout, in non-translated versions. + typeid = self.get_type_id(obj) obj = llarena.getfakearenaaddress(obj) llarena.arena_reset(obj - size_gc_header, totalsize, 0) llarena.arena_reserve(obj - size_gc_header, @@ -1526,7 +1527,9 @@ # because it can contain further pointers to other young objects. # We will fix such references to point to the copy of the young # objects when we walk 'old_objects_pointing_to_young'. - self.old_objects_pointing_to_young.append(newobj) + if self.has_gcptr(typeid): + # we only have to do it if we have any gcptrs + self.old_objects_pointing_to_young.append(newobj) _trace_drag_out._always_inline_ = True def _visit_young_rawmalloced_object(self, obj): @@ -1815,6 +1818,8 @@ # # It's the first time. We set the flag. hdr.tid |= GCFLAG_VISITED + if not self.has_gcptr(llop.extract_ushort(llgroup.HALFWORD, hdr.tid)): + return # # Trace the content of the object and put all objects it references # into the 'objects_to_trace' list. diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -75,6 +75,10 @@ infobits = self.get(typeid).infobits return (infobits & T_HAS_GCPTR_IN_VARSIZE) != 0 + def q_has_gcptr(self, typeid): + infobits = self.get(typeid).infobits + return (infobits & T_HAS_GCPTR) != 0 + def q_is_gcarrayofgcptr(self, typeid): infobits = self.get(typeid).infobits return (infobits & T_IS_GCARRAY_OF_GCPTR) != 0 @@ -162,7 +166,8 @@ self.q_is_rpython_class, self.q_has_custom_trace, self.q_get_custom_trace, - self.q_fast_path_tracing) + self.q_fast_path_tracing, + self.q_has_gcptr) # the lowest 16bits are used to store group member index @@ -175,7 +180,8 @@ T_HAS_FINALIZER = 0x200000 T_HAS_CUSTOM_TRACE = 0x400000 T_HAS_LIGHTWEIGHT_FINALIZER = 0x800000 -T_KEY_MASK = intmask(0xFF000000) +T_HAS_GCPTR = 0x1000000 +T_KEY_MASK = intmask(0xFE000000) # bug detection only T_KEY_VALUE = intmask(0x5A000000) # bug detection only def _check_valid_type_info(p): @@ -250,6 +256,8 @@ infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE + if infobits | T_HAS_GCPTR_IN_VARSIZE or offsets: + infobits |= T_HAS_GCPTR info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ From noreply at buildbot.pypy.org Sun Jul 28 01:30:39 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 28 Jul 2013 01:30:39 +0200 (CEST) Subject: [pypy-commit] pypy default: have BUILD_LIST_FROM_ARG utilize length_hint Message-ID: <20130727233039.DD4E41C11A9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r65726:330436db9f45 Date: 2013-07-27 15:53 -0700 http://bitbucket.org/pypy/pypy/changeset/330436db9f45/ Log: have BUILD_LIST_FROM_ARG utilize length_hint diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -896,6 +896,21 @@ assert False, (3,) except AssertionError, e: assert str(e) == "(3,)" + + # BUILD_LIST_FROM_ARG is PyPy specific + @py.test.mark.skipif('config.option.runappdirect') + def test_build_list_from_arg_length_hint(self): + hint_called = [False] + class Foo(object): + def __length_hint__(self): + hint_called[0] = True + return 5 + def __iter__(self): + for i in range(5): + yield i + l = [a for a in Foo()] + assert hint_called[0] + assert l == list(range(5)) class TestOptimizations: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -707,16 +707,17 @@ self.pushvalue(w_list) def BUILD_LIST_FROM_ARG(self, _, next_instr): + space = self.space # this is a little dance, because list has to be before the # value last_val = self.popvalue() + length_hint = 0 try: - lgt = self.space.len_w(last_val) - except OperationError, e: - if e.async(self.space): + length_hint = space.length_hint(last_val, length_hint) + except OperationError as e: + if e.async(space): raise - lgt = 0 # oh well - self.pushvalue(self.space.newlist([], sizehint=lgt)) + self.pushvalue(space.newlist([], sizehint=length_hint)) self.pushvalue(last_val) def LOAD_ATTR(self, nameindex, next_instr): From noreply at buildbot.pypy.org Sun Jul 28 01:48:52 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 28 Jul 2013 01:48:52 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: hg merge default Message-ID: <20130727234852.7449F1C142B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-ootype Changeset: r65727:e9c8f13a047b Date: 2013-07-28 00:46 +0100 http://bitbucket.org/pypy/pypy/changeset/e9c8f13a047b/ Log: hg merge default diff too long, truncating to 2000 out of 3109 lines diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,66 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +Highlights +========== + +* Support for os.statvfs and os.fstatvfs on unix systems. + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + +* `distutils`_: copy CPython's implementation of customize_compiler, dont call + split on environment variables, honour CFLAGS, CPPFLAGS, LDSHARED and + LDFLAGS. + +* During packaging, compile the CFFI tk extension. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 +.. _`distutils`: https://bitbucket.org/pypy/pypy/src/0c6eeae0316c11146f47fcf83e21e24f11378be1/?at=distutils-cppldflags + + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +How to use PyPy? +================ + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy +.. _`PyPy and pip`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +The PyPy Team. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -31,3 +31,18 @@ more precise information about which functions can be called. Needed for Topaz. .. branch: ssl_moving_write_buffer + +.. branch: pythoninspect-fix +Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process +to start interactive prompt when the script execution finishes. This adds +new __pypy__.os.real_getenv call that bypasses Python cache and looksup env +in the underlying OS. Translatorshell now works on PyPy. + +.. branch: add-statvfs +Added os.statvfs and os.fstatvfs + +.. branch: statvfs_tests +Added some addition tests for statvfs. + +.. branch: ndarray-subtype +Allow subclassing ndarray, i.e. matrix diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -556,8 +556,15 @@ # or # * PYTHONINSPECT is set and stdin is a tty. # + try: + # we need a version of getenv that bypasses Python caching + from __pypy__.os import real_getenv + except ImportError: + # dont fail on CPython here + real_getenv = os.getenv + return (interactive or - ((inspect or (readenv and os.getenv('PYTHONINSPECT'))) + ((inspect or (readenv and real_getenv('PYTHONINSPECT'))) and sys.stdin.isatty())) success = True diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -896,6 +896,21 @@ assert False, (3,) except AssertionError, e: assert str(e) == "(3,)" + + # BUILD_LIST_FROM_ARG is PyPy specific + @py.test.mark.skipif('config.option.runappdirect') + def test_build_list_from_arg_length_hint(self): + hint_called = [False] + class Foo(object): + def __length_hint__(self): + hint_called[0] = True + return 5 + def __iter__(self): + for i in range(5): + yield i + l = [a for a in Foo()] + assert hint_called[0] + assert l == list(range(5)) class TestOptimizations: diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -414,8 +414,8 @@ closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): raise operationerrfmt(space.w_ValueError, - "%s() requires a code object with %d free vars, not %d", - self.name, closure_len, len(code.co_freevars)) + "%N() requires a code object with %d free vars, not %d", + self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code @@ -482,7 +482,6 @@ space.abstract_isinstance_w(w_firstarg, self.w_class)): pass # ok else: - myname = self.getname(space, "") clsdescr = self.w_class.getname(space, "") if clsdescr: clsdescr += " instance" @@ -497,10 +496,10 @@ instdescr = instname + " instance" else: instdescr = "instance" - msg = ("unbound method %s() must be called with %s " + msg = ("unbound method %N() must be called with %s " "as first argument (got %s instead)") raise operationerrfmt(space.w_TypeError, msg, - myname, clsdescr, instdescr) + self, clsdescr, instdescr) return space.call_args(self.w_function, args) def descr_method_get(self, w_obj, w_cls=None): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -707,16 +707,17 @@ self.pushvalue(w_list) def BUILD_LIST_FROM_ARG(self, _, next_instr): + space = self.space # this is a little dance, because list has to be before the # value last_val = self.popvalue() + length_hint = 0 try: - lgt = self.space.len_w(last_val) - except OperationError, e: - if e.async(self.space): + length_hint = space.length_hint(last_val, length_hint) + except OperationError as e: + if e.async(space): raise - lgt = 0 # oh well - self.pushvalue(self.space.newlist([], sizehint=lgt)) + self.pushvalue(space.newlist([], sizehint=length_hint)) self.pushvalue(last_val) def LOAD_ATTR(self, nameindex, next_instr): diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -48,7 +48,7 @@ pdir = _get_next_path(ext='') p = pdir.ensure(dir=1).join('__main__.py') p.write(str(py.code.Source(source))) - # return relative path for testing purposes + # return relative path for testing purposes return py.path.local().bestrelpath(pdir) demo_script = getscript(""" @@ -706,6 +706,20 @@ assert 'hello world\n' in data assert '42\n' in data + def test_putenv_fires_interactive_within_process(self): + try: + import __pypy__ + except ImportError: + py.test.skip("This can be only tested on PyPy with real_getenv") + + # should be noninteractive when piped in + data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' + self.run('', senddata=data, expect_prompt=False) + + # should go interactive with -c + data = data.replace('\n', ';') + self.run("-c '%s'" % data, expect_prompt=True) + def test_option_S_copyright(self): data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data @@ -971,7 +985,7 @@ pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') app_main.setup_bootstrap_path(pypy_c) newpath = sys.path[:] - # we get at least lib_pypy + # we get at least lib_pypy # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) assert len(newpath) >= 2 for p in newpath: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -553,9 +553,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%s' for '%s' objects doesn't apply to '%T' object" + m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" raise operationerrfmt(space.w_TypeError, m, - self.name, self.w_cls.name, w_obj) + self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -50,6 +50,13 @@ } +class OsModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'real_getenv': 'interp_os.real_getenv' + } + + class Module(MixedModule): appleveldefs = { } @@ -82,6 +89,7 @@ "time": TimeModule, "thread": ThreadModule, "intop": IntOpModule, + "os": OsModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_os.py @@ -0,0 +1,9 @@ +import os + +from pypy.interpreter.gateway import unwrap_spec + + + at unwrap_spec(name='str0') +def real_getenv(space, name): + """Get an OS environment value skipping Python cache""" + return space.wrap(os.environ.get(name)) diff --git a/pypy/module/__pypy__/test/test_os.py b/pypy/module/__pypy__/test/test_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_os.py @@ -0,0 +1,16 @@ +class AppTestOs: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_real_getenv(self): + import __pypy__.os + import os + + key = 'UNLIKELY_SET' + assert key not in os.environ + os.putenv(key, '42') + # this one skips Python cache + assert __pypy__.os.real_getenv(key) == '42' + # this one can only see things set on interpter start (cached) + assert os.getenv(key) is None + os.unsetenv(key) + assert __pypy__.os.real_getenv(key) is None diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -4,6 +4,7 @@ from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.module._rawffi.interp_rawffi import wrap_dlopenerror from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rdynload import DLLHANDLE, dlopen, dlsym, dlclose, DLOpenError @@ -24,9 +25,7 @@ try: self.handle = dlopen(ll_libname, flags) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, - "cannot load library %s: %s", - filename, e.msg) + raise wrap_dlopenerror(space, e, filename) self.name = filename def __del__(self): diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_ffi/interp_funcptr.py @@ -14,7 +14,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error +from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os if os.name == 'nt': @@ -324,8 +324,7 @@ try: self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', self.name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, self.name) def getfunc(self, space, w_name, w_argtypes, w_restype): return _getfunc(space, self, w_name, w_argtypes, w_restype) diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_ffi/test/test_type_converter.py @@ -150,7 +150,7 @@ return self.do_and_wrap(w_ffitype) -class TestFromAppLevel(object): +class TestToAppLevel(object): spaceconfig = dict(usemodules=('_ffi',)) def setup_class(cls): diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -140,6 +140,11 @@ raise OperationError(space.w_SystemError, space.wrap("not supported by libffi")) +def wrap_dlopenerror(space, e, filename): + msg = e.msg if e.msg else 'unspecified error' + return operationerrfmt(space.w_OSError, 'Cannot load library %s: %s', + filename, msg) + class W_CDLL(W_Root): def __init__(self, space, name, cdll): @@ -219,8 +224,7 @@ try: cdll = CDLL(name) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, name) except OSError, e: raise wrap_oserror(space, e) return space.wrap(W_CDLL(space, name, cdll)) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -223,7 +223,8 @@ _rawffi.CDLL("xxxxx_this_name_does_not_exist_xxxxx") except OSError, e: print e - assert str(e).startswith("xxxxx_this_name_does_not_exist_xxxxx: ") + assert str(e).startswith( + "Cannot load library xxxxx_this_name_does_not_exist_xxxxx: ") else: raise AssertionError("did not fail??") diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -229,7 +229,7 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return chunks.apply(orig_arr) + return chunks.apply(space, orig_arr) def descr_setitem(self, space, orig_arr, w_index, w_value): try: @@ -238,7 +238,7 @@ except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) - view = chunks.apply(orig_arr) + view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value) def transpose(self, orig_array): @@ -269,14 +269,14 @@ shape, skip) return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - def swapaxes(self, orig_arr, axis1, axis2): + def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] strides = self.get_strides()[:] backstrides = self.get_backstrides()[:] shape[axis1], shape[axis2] = shape[axis2], shape[axis1] strides[axis1], strides[axis2] = strides[axis2], strides[axis1] backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] - return W_NDimArray.new_slice(self.start, strides, + return W_NDimArray.new_slice(space, self.start, strides, backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): @@ -289,13 +289,16 @@ return ArrayBuffer(self) def astype(self, space, dtype): - new_arr = W_NDimArray.from_shape(self.get_shape(), dtype) + strides, backstrides = support.calc_strides(self.get_shape(), dtype, + self.order) + impl = ConcreteArray(self.get_shape(), dtype, self.order, + strides, backstrides) if self.dtype.is_str_or_unicode() and not dtype.is_str_or_unicode(): raise OperationError(space.w_NotImplementedError, space.wrap( "astype(%s) not implemented yet" % self.dtype)) else: - loop.setslice(space, new_arr.get_shape(), new_arr.implementation, self) - return new_arr + loop.setslice(space, impl.get_shape(), impl, self) + return impl class ConcreteArrayNotOwning(BaseConcreteArray): diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -139,7 +139,7 @@ if not new_shape: return self if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(new_shape, self.dtype) + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) arr_iter.setitem(self.value) return arr.implementation @@ -152,7 +152,7 @@ def create_axis_iter(self, shape, dim, cum): raise Exception("axis iter should not happen on scalar") - def swapaxes(self, orig_array, axis1, axis2): + def swapaxes(self, space, orig_array, axis1, axis2): raise Exception("should not be called") def fill(self, w_value): @@ -166,7 +166,7 @@ return space.wrap(0) def astype(self, space, dtype): - return W_NDimArray.new_scalar(space, dtype, self.value) + raise Exception("should not be called") def base(self): return None diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -126,7 +126,7 @@ axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(arr.get_shape(), dtype) + index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -10,6 +10,15 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) +def wrap_impl(space, w_cls, w_instance, impl): + if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): + w_ret = W_NDimArray(impl) + else: + w_ret = space.allocate_instance(W_NDimArray, w_cls) + W_NDimArray.__init__(w_ret, impl) + assert isinstance(w_ret, W_NDimArray) + space.call_method(w_ret, '__array_finalize__', w_instance) + return w_ret class ArrayArgumentException(Exception): pass @@ -20,10 +29,11 @@ def __init__(self, implementation): assert isinstance(implementation, BaseArrayImplementation) + assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod - def from_shape(shape, dtype, order='C'): + def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -32,10 +42,12 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) + if w_instance: + return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(shape, storage, dtype, order='C', owning=False): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -46,15 +58,20 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) + if w_subtype: + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_method(w_ret, '__array_finalize__', w_subtype) + return w_ret return W_NDimArray(impl) @staticmethod - def new_slice(offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): from pypy.module.micronumpy.arrayimpl import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) - return W_NDimArray(impl) + return wrap_impl(space, space.type(orig_arr), orig_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -35,10 +35,12 @@ class BadToken(Exception): pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -184,14 +186,23 @@ def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) - return False - #return w_obj.boolval + return w_obj.boolval def is_w(self, w_obj, w_what): return w_obj is w_what + def issubtype(self, w_type1, w_type2): + return BoolObject(True) + def type(self, w_obj): - return w_obj.tp + if self.is_none(w_obj): + return self.w_None + try: + return w_obj.tp + except AttributeError: + if isinstance(w_obj, W_NDimArray): + return W_NDimArray + return self.w_None def gettypefor(self, w_obj): return None @@ -199,6 +210,11 @@ def call_function(self, tp, w_dtype): return w_dtype + def call_method(self, w_obj, s, *args): + # XXX even the hacks have hacks + return None + #return getattr(w_obj, 'descr_' + s)(self, *args) + @specialize.arg(1) def interp_w(self, tp, what): assert isinstance(what, tp) @@ -329,6 +345,8 @@ self.name = name.strip(" ") def execute(self, interp): + if self.name == 'None': + return None return interp.variables[self.name] def __repr__(self): @@ -451,6 +469,32 @@ def __repr__(self): return 'slice(%s,%s,%s)' % (self.start, self.stop, self.step) +class ArrayClass(Node): + def __init__(self): + self.v = W_NDimArray + + def execute(self, interp): + return self.v + + def __repr__(self): + return '' + +class DtypeClass(Node): + def __init__(self, dt): + self.v = dt + + def execute(self, interp): + if self.v == 'int': + dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'float': + dtype = get_dtype_cache(interp.space).w_float64dtype + else: + raise BadToken('unknown v to dtype "%s"' % self.v) + return dtype + + def __repr__(self): + return '' % self.v + class Execute(Node): def __init__(self, expr): self.expr = expr @@ -533,6 +577,14 @@ w_res = where(interp.space, arr, arg1, arg2) else: assert False + elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: + if len(self.args) != 2: + raise ArgumentMismatch + arg = self.args[1].execute(interp) + if self.name == 'view': + w_res = arr.descr_view(interp.space, arg) + else: + assert False else: raise WrongFunctionName if isinstance(w_res, W_NDimArray): @@ -652,8 +704,14 @@ if token.name == 'identifier': if tokens.remaining() and tokens.get(0).name == 'paren_left': stack.append(self.parse_function_call(token.v, tokens)) + elif token.v.strip(' ') == 'ndarray': + stack.append(ArrayClass()) + elif token.v.strip(' ') == 'int': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'float': + stack.append(DtypeClass('float')) else: - stack.append(Variable(token.v)) + stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': stack.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'operator': diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -88,7 +88,7 @@ y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(out, shape, arr, x, y, dtype) def dot(space, w_obj1, w_obj2): @@ -131,7 +131,8 @@ arr.get_dtype()) if _axis < 0 or len(arr.get_shape()) <= _axis: raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) - res = W_NDimArray.from_shape(shape, dtype, 'C') + # concatenate does not handle ndarray subtypes, it always returns a ndarray + res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: @@ -139,7 +140,7 @@ continue chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, arr.get_shape()[_axis]) - Chunks(chunks).apply(res).implementation.setslice(space, arr) + Chunks(chunks).apply(space, res).implementation.setslice(space, arr) axis_start += arr.get_shape()[_axis] return res @@ -150,22 +151,22 @@ arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, - orig_size)]).apply(res).implementation.setslice(space, arr) + orig_size)]).apply(space, w_res).implementation.setslice(space, arr) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats - res = W_NDimArray.from_shape(shape, arr.get_dtype()) + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) - Chunks(chunks).apply(res).implementation.setslice(space, arr) - return res + Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) + return w_res def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) @@ -261,7 +262,7 @@ else: shape = (shape[:axis2] + shape[axis2 + 1:axis1] + shape[axis1 + 1:] + [size]) - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) if size == 0: return out if shapelen == 2: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -41,7 +41,7 @@ dtype = w_arr_list[0].get_dtype() for w_arr in w_arr_list[1:]: dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) - out = base.W_NDimArray.from_shape(shape, dtype) + out = base.W_NDimArray.from_shape(space, shape, dtype) return out diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -64,8 +64,8 @@ base_iter.next_skip_x(start) if length == 1: return base_iter.getitem() - res = W_NDimArray.from_shape([length], base.get_dtype(), - base.get_order()) + res = W_NDimArray.from_shape(space, [length], base.get_dtype(), + base.get_order(), w_instance=base) return loop.flatiter_getitem(res, base_iter, step) def descr_setitem(self, space, w_idx, w_value): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -3,7 +3,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, issequence_w + ArrayArgumentException, issequence_w, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ interp_arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ @@ -85,8 +85,8 @@ res_shape = [size] + self.get_shape()[1:] else: res_shape = [size] - res = W_NDimArray.from_shape(res_shape, self.get_dtype()) - return loop.getitem_filter(res, self, arr) + w_res = W_NDimArray.from_shape(space, res_shape, self.get_dtype(), w_instance=self) + return loop.getitem_filter(w_res, self, arr) def setitem_filter(self, space, idx, val): if len(idx.get_shape()) > 1 and idx.get_shape() != self.get_shape(): @@ -145,12 +145,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return chunks.apply(self) + return chunks.apply(space, self) shape = res_shape + self.get_shape()[len(indexes):] - res = W_NDimArray.from_shape(shape, self.get_dtype(), self.get_order()) - if not res.get_size(): - return res - return loop.getitem_array_int(space, self, res, iter_shape, indexes, + w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), + self.get_order(), w_instance=self) + if not w_res.get_size(): + return w_res + return loop.getitem_array_int(space, self, w_res, iter_shape, indexes, prefix) def setitem_array_int(self, space, w_index, w_value): @@ -161,7 +162,7 @@ # w_index is a list of slices w_value = convert_to_array(space, w_value) chunks = self.implementation._prepare_slice_args(space, w_index) - view = chunks.apply(self) + view = chunks.apply(space, self) view.implementation.setslice(space, w_value) return loop.setitem_array_int(space, self, iter_shape, indexes, val_arr, @@ -259,14 +260,17 @@ return self.implementation.get_scalar_value() def descr_copy(self, space): - return W_NDimArray(self.implementation.copy(space)) + copy = self.implementation.copy(space) + w_subtype = space.type(self) + return wrap_impl(space, w_subtype, self, copy) def descr_get_real(self, space): - return W_NDimArray(self.implementation.get_real(self)) + return wrap_impl(space, space.type(self), self, + self.implementation.get_real(self)) def descr_get_imag(self, space): ret = self.implementation.get_imag(self) - return W_NDimArray(ret) + return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): # copy (broadcast) values into self @@ -298,7 +302,7 @@ new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) new_impl = self.implementation.reshape(space, self, new_shape) if new_impl is not None: - return W_NDimArray(new_impl) + return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: @@ -326,7 +330,7 @@ """ if self.is_scalar(): return self - return self.implementation.swapaxes(self, axis1, axis2) + return self.implementation.swapaxes(space, self, axis1, axis2) def descr_tolist(self, space): if len(self.get_shape()) == 0: @@ -446,17 +450,24 @@ # we must do that, because we need a working set. otherwise # we would modify the array in-place. Use this to our advantage # by converting nonnative byte order. + if self.is_scalar(): + return space.wrap(0) s = self.get_dtype().name if not self.get_dtype().native: s = s[1:] dtype = interp_dtype.get_dtype_cache(space).dtypes_by_name[s] contig = self.implementation.astype(space, dtype) - return contig.implementation.argsort(space, w_axis) + return contig.argsort(space, w_axis) def descr_astype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - return self.implementation.astype(space, dtype) + impl = self.implementation + if isinstance(impl, scalar.Scalar): + return W_NDimArray.new_scalar(space, dtype, impl.value) + else: + new_impl = impl.astype(space, dtype) + return wrap_impl(space, space.type(self), self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -471,9 +482,9 @@ loop.byteswap(self.implementation, self.implementation) return self else: - res = W_NDimArray.from_shape(self.get_shape(), self.get_dtype()) - loop.byteswap(self.implementation, res.implementation) - return res + w_res = W_NDimArray.from_shape(space, self.get_shape(), self.get_dtype(), w_instance=self) + loop.byteswap(self.implementation, w_res.implementation) + return w_res @unwrap_spec(mode=str) def descr_choose(self, space, w_choices, w_out=None, mode='raise'): @@ -564,7 +575,7 @@ if space.is_none(w_out): if self.get_dtype().is_bool_type(): #numpy promotes bool.round() to float16. Go figure. - w_out = W_NDimArray.from_shape(self.get_shape(), + w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) else: w_out = None @@ -578,6 +589,8 @@ else: calc_dtype = out.get_dtype() + if decimals == 0: + out = out.descr_view(space,space.type(self)) loop.round(space, self, calc_dtype, self.get_shape(), decimals, out) return out @@ -619,9 +632,13 @@ "trace not implemented yet")) def descr_view(self, space, w_dtype=None, w_type=None) : - if w_type is not None: - raise OperationError(space.w_NotImplementedError, space.wrap( - "view(... type=) not implemented yet")) + if not w_type and w_dtype: + try: + if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + w_type = w_dtype + w_dtype = None + except (OperationError, TypeError): + pass if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -651,8 +668,9 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) new_shape[-1] = new_shape[-1] * old_itemsize / new_itemsize - return W_NDimArray(impl.get_view(self, dtype, new_shape)) - + v = impl.get_view(self, dtype, new_shape) + w_ret = wrap_impl(space, w_type, self, v) + return w_ret # --------------------- operations ---------------------------- @@ -760,9 +778,9 @@ return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) # Do the dims match? out_shape, other_critical_dim = match_dot_shapes(space, self, other) - result = W_NDimArray.from_shape(out_shape, dtype) + w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, result, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) @unwrap_spec(w_axis = WrappedDefault(None)) @@ -884,14 +902,20 @@ isfortran = space.getitem(w_state, space.wrap(3)) storage = space.getitem(w_state, space.wrap(4)) - self.implementation = W_NDimArray.from_shape_and_storage([space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), dtype, owning=True).implementation + self.implementation = W_NDimArray.from_shape_and_storage(space, + [space.int_w(i) for i in space.listview(shape)], + rffi.str2charp(space.str_w(storage), track_allocation=False), + dtype, owning=True).implementation + def descr___array_finalize__(self, space, w_obj): + pass - at unwrap_spec(offset=int) + at unwrap_spec(offset=int, order=str) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, - offset=0, w_strides=None, w_order=None): + offset=0, w_strides=None, order='C'): + from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray + from pypy.module.micronumpy.support import calc_strides if (offset != 0 or not space.is_none(w_strides) or - not space.is_none(w_order) or not space.is_none(w_buffer)): raise OperationError(space.w_NotImplementedError, space.wrap("unsupported param")) @@ -900,10 +924,19 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype) - return W_NDimArray.from_shape(shape, dtype) + if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): + return W_NDimArray.from_shape(space, shape, dtype, order) + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = ConcreteArray(shape, dtype.base, order, strides, + backstrides) + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_function(space.getattr(w_ret, + space.wrap('__array_finalize__')), w_subtype) + return w_ret @unwrap_spec(addr=int) -def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype): +def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ Create an array from an existing buffer, given its address as int. PyPy-only implementation detail. @@ -912,9 +945,17 @@ from rpython.rlib.rawstorage import RAW_STORAGE_PTR storage = rffi.cast(RAW_STORAGE_PTR, addr) dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + space.call_function(space.gettypefor(interp_dtype.W_Dtype), + w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape_and_storage(shape, storage, dtype) + if w_subtype: + if not space.isinstance_w(w_subtype, space.w_type): + raise OperationError(space.w_ValueError, space.wrap( + "subtype must be a subtype of ndarray, not a class instance")) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + 'C', False, w_subtype) + else: + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype) W_NDimArray.typedef = TypeDef( "ndarray", @@ -1042,6 +1083,7 @@ W_NDimArray.fdel___pypy_data__), __reduce__ = interp2app(W_NDimArray.descr_reduce), __setstate__ = interp2app(W_NDimArray.descr_setstate), + __array_finalize__ = interp2app(W_NDimArray.descr___array_finalize__), ) @unwrap_spec(ndmin=int, copy=bool, subok=bool) @@ -1094,12 +1136,12 @@ dtype = interp_dtype.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape - arr = W_NDimArray.from_shape(shape, dtype, order=order) - arr_iter = arr.create_iter() + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + arr_iter = w_arr.create_iter() for w_elem in elems_w: arr_iter.setitem(dtype.coerce(space, w_elem)) arr_iter.next() - return arr + return w_arr @unwrap_spec(order=str) def zeros(space, w_shape, w_dtype=None, order='C'): @@ -1109,7 +1151,7 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - return space.wrap(W_NDimArray.from_shape(shape, dtype=dtype, order=order)) + return space.wrap(W_NDimArray.from_shape(space, shape, dtype=dtype, order=order)) @unwrap_spec(order=str) def ones(space, w_shape, w_dtype=None, order='C'): @@ -1119,10 +1161,10 @@ shape = _find_shape(space, w_shape, dtype) if not shape: return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) - arr = W_NDimArray.from_shape(shape, dtype=dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) one = dtype.box(1) - arr.fill(one) - return space.wrap(arr) + w_arr.fill(one) + return space.wrap(w_arr) def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -50,7 +50,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([num_items], dtype=dtype) + a = W_NDimArray.from_shape(space, [num_items], dtype=dtype) ai = a.create_iter() for val in items: ai.setitem(val) @@ -71,7 +71,7 @@ raise OperationError(space.w_ValueError, space.wrap( "string is smaller than requested size")) - a = W_NDimArray.from_shape([count], dtype=dtype) + a = W_NDimArray.from_shape(space, [count], dtype=dtype) loop.fromstring_loop(a, dtype, itemsize, s) return space.wrap(a) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -181,7 +181,8 @@ temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() - temp = W_NDimArray.from_shape(temp_shape, dtype) + temp = W_NDimArray.from_shape(space, temp_shape, dtype, + w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -207,7 +208,7 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumultative, temp) if cumultative: @@ -216,7 +217,7 @@ raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape([obj.get_size()], dtype) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumultative(obj, out, dtype, self.func, self.identity) return out @@ -295,7 +296,7 @@ return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) - return loop.call1(shape, self.func, calc_dtype, res_dtype, + return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out) @@ -370,7 +371,7 @@ return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) - return loop.call2(new_shape, self.func, calc_dtype, + return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out) @@ -450,7 +451,7 @@ return dt2 return dt1 return dt2 - else: + else: # increase to the next signed type dtypenum = dt2.num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] @@ -537,7 +538,13 @@ return current_guess if current_guess is complex_type: return complex_type - return interp_dtype.get_dtype_cache(space).w_float64dtype + if space.isinstance_w(w_obj, space.w_float): + return float_type + elif space.isinstance_w(w_obj, space.w_slice): + return long_dtype + raise operationerrfmt(space.w_NotImplementedError, + 'unable to create dtype from objects, ' '"%T" instance not supported', + w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -58,11 +58,11 @@ def __init__(self, name): self.name = name - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation ofs, subdtype = arr.dtype.fields[self.name] # strides backstrides are identical, ofs only changes start - return W_NDimArray.new_slice(arr.start + ofs, arr.get_strides(), + return W_NDimArray.new_slice(space, arr.start + ofs, arr.get_strides(), arr.get_backstrides(), arr.shape, arr, orig_arr, subdtype) @@ -81,13 +81,13 @@ assert s >= 0 return shape[:] + old_shape[s:] - def apply(self, orig_arr): + def apply(self, space, orig_arr): arr = orig_arr.implementation shape = self.extend_shape(arr.shape) r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), self.l) _, start, strides, backstrides = r - return W_NDimArray.new_slice(start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, orig_arr) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -19,9 +19,34 @@ reds = ['shape', 'w_lhs', 'w_rhs', 'out', 'left_iter', 'right_iter', 'out_iter']) -def call2(shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): +def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out): + # handle array_priority + # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does: + # 1. if __array_priorities__ are equal and one is an ndarray and the + # other is a subtype, flip the order + # 2. elif rhs.__array_priority__ is higher, flip the order + # Now return the subtype of the first one + + w_ndarray = space.gettypefor(W_NDimArray) + lhs_type = space.type(w_lhs) + rhs_type = space.type(w_rhs) + lhs_for_subtype = w_lhs + rhs_for_subtype = w_rhs + #it may be something like a FlatIter, which is not an ndarray + if not space.is_true(space.issubtype(lhs_type, w_ndarray)): + lhs_type = space.type(w_lhs.base) + lhs_for_subtype = w_lhs.base + if not space.is_true(space.issubtype(rhs_type, w_ndarray)): + rhs_type = space.type(w_rhs.base) + rhs_for_subtype = w_rhs.base + if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray): + lhs_for_subtype = rhs_for_subtype + + # TODO handle __array_priorities__ and maybe flip the order + if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, + w_instance=lhs_for_subtype) left_iter = w_lhs.create_iter(shape) right_iter = w_rhs.create_iter(shape) out_iter = out.create_iter(shape) @@ -48,9 +73,9 @@ reds = ['shape', 'w_obj', 'out', 'obj_iter', 'out_iter']) -def call1(shape, func, calc_dtype, res_dtype, w_obj, out): +def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out): if out is None: - out = W_NDimArray.from_shape(shape, res_dtype) + out = W_NDimArray.from_shape(space, shape, res_dtype, w_instance=w_obj) obj_iter = w_obj.create_iter(shape) out_iter = out.create_iter(shape) shapelen = len(shape) @@ -437,12 +462,12 @@ def tostring(space, arr): builder = StringBuilder() iter = arr.create_iter() - res_str = W_NDimArray.from_shape([1], arr.get_dtype(), order='C') + w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') itemsize = arr.get_dtype().itemtype.get_element_size() res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), - res_str.implementation.get_storage_as_int(space)) + w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): - res_str.implementation.setitem(0, iter.getitem()) + w_res_str.implementation.setitem(0, iter.getitem()) for i in range(itemsize): builder.append(res_str_casted[i]) iter.next() diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -2,7 +2,7 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, - FunctionCall, FakeSpace) + FunctionCall, FakeSpace, W_NDimArray) class TestCompiler(object): @@ -84,6 +84,7 @@ assert interp.code.statements[0] == Assignment( 'a', Operator(Variable('b'), "+", FloatConstant(3))) + class TestRunner(object): def run(self, code): interp = numpy_compile(code) @@ -290,4 +291,32 @@ ''') assert interp.results[0].real == 0 assert interp.results[0].imag == 1 - + + def test_view_none(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = None + c = view(a, b) + c -> 0 + ''') + assert interp.results[0].value == 1 + + def test_view_ndarray(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = ndarray + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) + + def test_view_dtype(self): + interp = self.run(''' + a = [1, 0, 3, 0] + b = int + c = view(a, b) + c + ''') + results = interp.results[0] + assert isinstance(results, W_NDimArray) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -976,3 +976,16 @@ assert a[0] == 1 assert (a + a)[1] == 4 +class AppTestObjectDtypes(BaseNumpyAppTest): + def test_scalar_from_object(self): + from numpypy import array + class Polynomial(object): + pass + try: + a = array(Polynomial()) + assert a.shape == () + except NotImplementedError, e: + if e.message.find('unable to create dtype from objects')>=0: + skip('creating ojbect dtype not supported yet') + + diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -24,8 +24,8 @@ def get_size(self): return 1 -def create_slice(a, chunks): - return Chunks(chunks).apply(W_NDimArray(a)).implementation +def create_slice(space, a, chunks): + return Chunks(chunks).apply(space, W_NDimArray(a)).implementation def create_array(*args, **kwargs): @@ -46,100 +46,100 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] - a = create_array([1, 0, 7], MockDtype(), order='C') + a = create_array(self.space, [1, 0, 7], MockDtype(), order='C') assert a.strides == [7, 7, 1] assert a.backstrides == [0, 0, 6] def test_create_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] assert s.backstrides == [40, 100] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 1 assert s.strides == [2, 10, 50] assert s.backstrides == [6, 40, 100] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.shape == [2, 1] assert s.strides == [3, 10] assert s.backstrides == [3, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 20 assert s.shape == [10, 3] def test_create_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(3, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] assert s.backstrides == [12, 2] - s = create_slice(a, [Chunk(1, 9, 2, 4)]) + s = create_slice(self.space, a, [Chunk(1, 9, 2, 4)]) assert s.start == 15 assert s.strides == [30, 3, 1] assert s.backstrides == [90, 12, 2] - s = create_slice(a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2), Chunk(1, 2, 1, 1), Chunk(1, 0, 0, 1)]) assert s.start == 19 assert s.shape == [2, 1] assert s.strides == [45, 3] assert s.backstrides == [45, 0] - s = create_slice(a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(0, 10, 1, 10), Chunk(2, 0, 0, 1)]) assert s.start == 6 assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [50] assert s2.parent is a assert s2.backstrides == [100] assert s2.start == 35 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [3, 50] assert s2.backstrides == [3, 100] assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(5, 0, 0, 1)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 - s2 = create_slice(s, [Chunk(3, 0, 0, 1)]) + s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) assert s2.shape == [3] assert s2.strides == [1] assert s2.parent is a assert s2.backstrides == [2] assert s2.start == 5 * 15 + 3 * 3 - s = create_slice(a, [Chunk(1, 5, 3, 2)]) - s2 = create_slice(s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) + s = create_slice(self.space, a, [Chunk(1, 5, 3, 2)]) + s2 = create_slice(self.space, s, [Chunk(0, 2, 1, 2), Chunk(2, 0, 0, 1)]) assert s2.shape == [2, 3] assert s2.strides == [45, 1] assert s2.backstrides == [45, 2] assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = create_array([10, 5, 3], MockDtype(), order='F') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = create_array([10, 5, 3], MockDtype(), order='C') - s = create_slice(a, [Chunk(9, -1, -2, 5)]) + a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] assert s.backstrides == [-120, 12, 2] @@ -207,7 +207,8 @@ raw_storage_setitem(storage, i, rffi.cast(rffi.UCHAR, i)) # dtypes = get_dtype_cache(self.space) - w_array = W_NDimArray.from_shape_and_storage([2, 2], storage, dtypes.w_int8dtype) + w_array = W_NDimArray.from_shape_and_storage(self.space, [2, 2], + storage, dtypes.w_int8dtype) def get(i, j): return w_array.getitem(self.space, [i, j]).value assert get(0, 0) == 0 @@ -1442,7 +1443,7 @@ assert x.view('int8').shape == (10, 3) def test_ndarray_view_empty(self): - from numpypy import array, int8, int16, dtype + from numpypy import array, int8, int16 x = array([], dtype=[('a', int8), ('b', int8)]) y = x.view(dtype=int16) @@ -2876,6 +2877,12 @@ assert y[0, 1] == 2 y[0, 1] = 42 assert x[1] == 42 + class C(ndarray): + pass + z = ndarray._from_shape_and_storage([4, 1], addr, x.dtype, C) + assert isinstance(z, C) + assert z.shape == (4, 1) + assert z[1, 0] == 42 def test___pypy_data__(self): from numpypy import array @@ -2890,7 +2897,7 @@ class AppTestLongDoubleDtypes(BaseNumpyAppTest): def setup_class(cls): from pypy.module.micronumpy import Module - print dir(Module.interpleveldefs) + #print dir(Module.interpleveldefs) if not Module.interpleveldefs.get('longfloat', None): py.test.skip('no longdouble types yet') BaseNumpyAppTest.setup_class.im_func(cls) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -0,0 +1,223 @@ +import py +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestSupport(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_NoNew = cls.space.appexec([], '''(): + from numpypy import ndarray + class NoNew(ndarray): + def __new__(cls, subtype): + raise ValueError('should not call __new__') + def __array_finalize__(self, obj): + + self.called_finalize = True + return NoNew ''') + cls.w_SubType = cls.space.appexec([], '''(): + from numpypy import ndarray, asarray + class SubType(ndarray): + def __new__(obj, input_array): + obj = asarray(input_array).view(obj) + obj.called_new = True + return obj + def __array_finalize__(self, obj): + self.called_finalize = True + return SubType ''') + + def test_subtype_base(self): + from numpypy import ndarray, dtype + class C(ndarray): + def __new__(subtype, shape, dtype): + self = ndarray.__new__(subtype, shape, dtype) + self.id = 'subtype' + return self + a = C([2, 2], int) + assert isinstance(a, C) + assert isinstance(a, ndarray) + assert a.shape == (2, 2) + assert a.dtype is dtype(int) + assert a.id == 'subtype' + a = a.reshape(1, 4) + b = a.reshape(4, 1) + assert isinstance(b, C) + #make sure __new__ was not called + assert not getattr(b, 'id', None) + a.fill(3) + b = a[0] + assert isinstance(b, C) + assert (b == 3).all() + b[0]=100 + assert a[0,0] == 100 + + def test_subtype_view(self): + from numpypy import ndarray, array + class matrix(ndarray): + def __new__(subtype, data, dtype=None, copy=True): + if isinstance(data, matrix): + return data + return data.view(subtype) + a = array(range(5)) + b = matrix(a) + assert isinstance(b, matrix) + assert (b == a).all() + + + def test_finalize(self): + #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray + import numpypy as np + class InfoArray(np.ndarray): + def __new__(subtype, shape, dtype=float, buffer=None, offset=0, + strides=None, order='C', info=None): + obj = np.ndarray.__new__(subtype, shape, dtype, buffer, + offset, strides, order) + obj.info = info + return obj + + def __array_finalize__(self, obj): + if obj is None: + print 'finalize with None' + return + # printing the object itself will crash the test + print 'finalize with something',type(obj) + self.info = getattr(obj, 'info', None) + obj = InfoArray(shape=(3,)) + assert isinstance(obj, InfoArray) + assert obj.info is None + obj = InfoArray(shape=(3,), info='information') + assert obj.info == 'information' + v = obj[1:] + assert isinstance(v, InfoArray) + assert v.base is obj + assert v.info == 'information' + arr = np.arange(10) + cast_arr = arr.view(InfoArray) + assert isinstance(cast_arr, InfoArray) + assert cast_arr.base is arr + assert cast_arr.info is None + + def test_sub_where(self): + from numpypy import where, ones, zeros, array + a = array([1, 2, 3, 0, -3]) + v = a.view(self.NoNew) + b = where(array(v) > 0, ones(5), zeros(5)) + assert (b == [1, 1, 1, 0, 0]).all() + # where returns an ndarray irregardless of the subtype of v + assert not isinstance(b, self.NoNew) + + def test_sub_repeat(self): + from numpypy import repeat, array + a = self.SubType(array([[1, 2], [3, 4]])) + b = repeat(a, 3) + assert (b == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]).all() + assert isinstance(b, self.SubType) + + def test_sub_flatiter(self): + from numpypy import array + a = array(range(9)).reshape(3, 3).view(self.NoNew) + c = array(range(9)).reshape(3, 3) + assert isinstance(a.flat[:] + a.flat[:], self.NoNew) + assert isinstance(a.flat[:] + c.flat[:], self.NoNew) + assert isinstance(c.flat[:] + a.flat[:], self.NoNew) + assert not isinstance(c.flat[:] + c.flat[:], self.NoNew) + + def test_sub_getitem_filter(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + c = b[array([False, True, False, True, False])] + assert c.shape == (2,) + assert (c == [1, 3]).all() + assert isinstance(c, self.SubType) + assert b.called_new + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_getitem_array_int(self): + from numpypy import array + a = array(range(5)) + b = self.SubType(a) + assert b.called_new + c = b[array([3, 2, 1, 4])] + assert (c == [3, 2, 1, 4]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + + def test_sub_round(self): + from numpypy import array + a = array(range(10), dtype=float).view(self.NoNew) + # numpy compatibility + b = a.round(decimals=0) + assert isinstance(b, self.NoNew) + b = a.round(decimals=1) + assert not isinstance(b, self.NoNew) + b = a.round(decimals=-1) + assert not isinstance(b, self.NoNew) + + def test_sub_dot(self): + # the returned type is that of the first argument + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = array(range(12)).reshape(4,3).view(self.SubType) + d = c.dot(a) + assert isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert d.called_finalize + d = a.dot(c) + assert not isinstance(d, self.SubType) + assert not getattr(d, 'called_new', False) + assert not getattr(d, 'called_finalize', False) + + def test_sub_reduce(self): + # i.e. sum, max + # test for out as well + from numpypy import array + a = array(range(12)).reshape(3,4) + b = self.SubType(a) + c = b.sum(axis=0) + assert (c == [12, 15, 18, 21]).all() + assert isinstance(c, self.SubType) + assert not getattr(c, 'called_new', False) + assert c.called_finalize + d = array(range(4)) + c = b.sum(axis=0, out=d) + assert c is d + assert not isinstance(c, self.SubType) + d = array(range(4)).view(self.NoNew) + c = b.sum(axis=0, out=d) + assert c is d + assert isinstance(c, self.NoNew) + + def test_sub_call2(self): + # c + a vs. a + c, what about array priority? + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = self.SubType(range(12)) + c = b + a + assert isinstance(c, self.SubType) + c = a + b + assert isinstance(c, self.NoNew) + d = range(12) + e = a - d + assert isinstance(e, self.NoNew) + + def test_sub_call1(self): + from numpypy import array, sqrt + a = array(range(12)).view(self.NoNew) + b = sqrt(a) + assert b.called_finalize == True + + def test_sub_astype(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.astype(float) + assert b.called_finalize == True + + def test_sub_reshape(self): + from numpypy import array + a = array(range(12)).view(self.NoNew) + b = a.reshape(3, 4) + assert b.called_finalize == True + diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -13,21 +13,21 @@ applevel_name = os.name appleveldefs = { - 'error' : 'app_posix.error', - 'stat_result': 'app_posix.stat_result', - 'statvfs_result': 'app_posix.statvfs_result', - 'fdopen' : 'app_posix.fdopen', - 'tmpfile' : 'app_posix.tmpfile', - 'popen' : 'app_posix.popen', - 'tmpnam' : 'app_posix.tmpnam', - 'tempnam' : 'app_posix.tempnam', + 'error': 'app_posix.error', + 'stat_result': 'app_posix.stat_result', + 'statvfs_result': 'app_posix.statvfs_result', + 'fdopen': 'app_posix.fdopen', + 'tmpfile': 'app_posix.tmpfile', + 'popen': 'app_posix.popen', + 'tmpnam': 'app_posix.tmpnam', + 'tempnam': 'app_posix.tempnam', } if os.name == 'nt': appleveldefs.update({ - 'popen2' : 'app_posix.popen2', - 'popen3' : 'app_posix.popen3', - 'popen4' : 'app_posix.popen4', - }) + 'popen2': 'app_posix.popen2', + 'popen3': 'app_posix.popen3', + 'popen4': 'app_posix.popen4', + }) if hasattr(os, 'wait'): appleveldefs['wait'] = 'app_posix.wait' @@ -37,42 +37,44 @@ appleveldefs['wait4'] = 'app_posix.wait4' interpleveldefs = { - 'open' : 'interp_posix.open', - 'lseek' : 'interp_posix.lseek', - 'write' : 'interp_posix.write', - 'isatty' : 'interp_posix.isatty', - 'read' : 'interp_posix.read', - 'close' : 'interp_posix.close', - 'closerange': 'interp_posix.closerange', - 'fstat' : 'interp_posix.fstat', - 'stat' : 'interp_posix.stat', - 'lstat' : 'interp_posix.lstat', - 'stat_float_times' : 'interp_posix.stat_float_times', - 'dup' : 'interp_posix.dup', - 'dup2' : 'interp_posix.dup2', - 'access' : 'interp_posix.access', - 'times' : 'interp_posix.times', - 'system' : 'interp_posix.system', - 'unlink' : 'interp_posix.unlink', - 'remove' : 'interp_posix.remove', - 'getcwd' : 'interp_posix.getcwd', - 'getcwdu' : 'interp_posix.getcwdu', - 'chdir' : 'interp_posix.chdir', - 'mkdir' : 'interp_posix.mkdir', - 'rmdir' : 'interp_posix.rmdir', - 'environ' : 'interp_posix.get(space).w_environ', - 'listdir' : 'interp_posix.listdir', - 'strerror' : 'interp_posix.strerror', - 'pipe' : 'interp_posix.pipe', - 'chmod' : 'interp_posix.chmod', - 'rename' : 'interp_posix.rename', - 'umask' : 'interp_posix.umask', - '_exit' : 'interp_posix._exit', - 'utime' : 'interp_posix.utime', - '_statfields': 'interp_posix.getstatfields(space)', - 'kill' : 'interp_posix.kill', - 'abort' : 'interp_posix.abort', - 'urandom' : 'interp_posix.urandom', + 'open': 'interp_posix.open', + 'lseek': 'interp_posix.lseek', + 'write': 'interp_posix.write', + 'isatty': 'interp_posix.isatty', + 'read': 'interp_posix.read', + 'close': 'interp_posix.close', + 'closerange': 'interp_posix.closerange', + + 'fstat': 'interp_posix.fstat', + 'stat': 'interp_posix.stat', + 'lstat': 'interp_posix.lstat', + 'stat_float_times': 'interp_posix.stat_float_times', + + 'dup': 'interp_posix.dup', + 'dup2': 'interp_posix.dup2', + 'access': 'interp_posix.access', + 'times': 'interp_posix.times', + 'system': 'interp_posix.system', + 'unlink': 'interp_posix.unlink', + 'remove': 'interp_posix.remove', + 'getcwd': 'interp_posix.getcwd', + 'getcwdu': 'interp_posix.getcwdu', + 'chdir': 'interp_posix.chdir', + 'mkdir': 'interp_posix.mkdir', + 'rmdir': 'interp_posix.rmdir', + 'environ': 'interp_posix.get(space).w_environ', + 'listdir': 'interp_posix.listdir', + 'strerror': 'interp_posix.strerror', + 'pipe': 'interp_posix.pipe', + 'chmod': 'interp_posix.chmod', + 'rename': 'interp_posix.rename', + 'umask': 'interp_posix.umask', + '_exit': 'interp_posix._exit', + 'utime': 'interp_posix.utime', From noreply at buildbot.pypy.org Sun Jul 28 11:10:37 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 28 Jul 2013 11:10:37 +0200 (CEST) Subject: [pypy-commit] pypy kill-ootype: close to be merged branch Message-ID: <20130728091037.14B541C00D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: kill-ootype Changeset: r65728:4df63ba364d5 Date: 2013-07-28 11:06 +0200 http://bitbucket.org/pypy/pypy/changeset/4df63ba364d5/ Log: close to be merged branch From noreply at buildbot.pypy.org Sun Jul 28 11:10:41 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 28 Jul 2013 11:10:41 +0200 (CEST) Subject: [pypy-commit] pypy default: (ronan) merge kill-ootype branch. This kills ootype support from the source, Message-ID: <20130728091041.9A42B1C00D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65729:f6fe5e720007 Date: 2013-07-28 11:09 +0200 http://bitbucket.org/pypy/pypy/changeset/f6fe5e720007/ Log: (ronan) merge kill-ootype branch. This kills ootype support from the source, which is not really used by anyone and in fact quite annoying to maintain. Kills about 35k LOC diff too long, truncating to 2000 out of 43686 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,11 +48,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "_sha", "cStringIO", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -340,10 +335,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -351,10 +342,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -234,9 +234,6 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - ## if config.translation.type_system == 'ootype': - ## config.objspace.usemodules.suggest(rbench=True) - config.translation.suggest(check_str_without_nul=True) if config.translation.thread: @@ -271,12 +268,6 @@ elif config.objspace.usemodules.pypyjit: config.translation.jit = True - if config.translation.backend == "cli": - config.objspace.usemodules.clr = True - # XXX did it ever work? - #elif config.objspace.usemodules.clr: - # config.translation.backend == "cli" - if config.translation.sandbox: config.objspace.lonepycfiles = False config.objspace.usepycfiles = False @@ -292,16 +283,6 @@ wrapstr = 'space.wrap(%r)' % (options) pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr - if config.translation.backend in ["cli", "jvm"] and sys.platform == "win32": - # HACK: The ftruncate implementation in streamio.py which is used for the Win32 platform - # is specific for the C backend and can't be generated on CLI or JVM. Because of that, - # we have to patch it out. - from rpython.rlib import streamio - def ftruncate_win32_dummy(fd, size): pass - def _setfd_binary_dummy(fd): pass - streamio.ftruncate_win32 = ftruncate_win32_dummy - streamio._setfd_binary = _setfd_binary_dummy - return self.get_entry_point(config) def jitpolicy(self, driver): diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -18,10 +18,10 @@ The builtin Unicode codecs use the following interface: - _encode(Unicode_object[,errors='strict']) -> + _encode(Unicode_object[,errors='strict']) -> (string object, bytes consumed) - _decode(char_buffer_obj[,errors='strict']) -> + _decode(char_buffer_obj[,errors='strict']) -> (Unicode object, bytes consumed) _encode() interfaces also accept non-Unicode object as @@ -90,8 +90,7 @@ "NOT_RPYTHON" # mbcs codec is Windows specific, and based on rffi. - if (hasattr(runicode, 'str_decode_mbcs') and - space.config.translation.type_system != 'ootype'): + if (hasattr(runicode, 'str_decode_mbcs')): self.interpleveldefs['mbcs_encode'] = 'interp_codecs.mbcs_encode' self.interpleveldefs['mbcs_decode'] = 'interp_codecs.mbcs_decode' diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -12,18 +12,6 @@ "set_file_encoding": "interp_file.set_file_encoding", } - def __init__(self, space, *args): - "NOT_RPYTHON" - - # on windows with oo backends, remove file.truncate, - # because the implementation is based on rffi - if (sys.platform == 'win32' and - space.config.translation.type_system == 'ootype'): - from pypy.module._file.interp_file import W_File - del W_File.typedef.rawdict['truncate'] - - MixedModule.__init__(self, space, *args) - def shutdown(self, space): # at shutdown, flush all open streams. Ignore I/O errors. from pypy.module._file.interp_file import getopenstreams, StreamErrors diff --git a/pypy/module/clr/__init__.py b/pypy/module/clr/__init__.py deleted file mode 100644 --- a/pypy/module/clr/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Package initialisation -from pypy.interpreter.mixedmodule import MixedModule - -import boxing_rules # with side effects - -class Module(MixedModule): - """CLR module""" - - appleveldefs = { - 'dotnetimporter': 'app_importer.importer' - } - - interpleveldefs = { - '_CliObject_internal': 'interp_clr.W_CliObject', - 'call_staticmethod': 'interp_clr.call_staticmethod', - 'load_cli_class': 'interp_clr.load_cli_class', - 'get_assemblies_info': 'interp_clr.get_assemblies_info', - 'AddReferenceByPartialName': 'interp_clr.AddReferenceByPartialName', - } - - def startup(self, space): - self.space.appexec([self], """(clr_module): - import sys - clr_module.get_assemblies_info() # load info for std assemblies - sys.meta_path.append(clr_module.dotnetimporter()) - """) diff --git a/pypy/module/clr/app_clr.py b/pypy/module/clr/app_clr.py deleted file mode 100644 --- a/pypy/module/clr/app_clr.py +++ /dev/null @@ -1,204 +0,0 @@ -# NOT_RPYTHON - -class StaticMethodWrapper(object): - __slots__ = ('class_name', 'meth_name',) - - def __init__(self, class_name, meth_name): - self.class_name = class_name - self.meth_name = meth_name - - def __call__(self, *args): - import clr - return clr.call_staticmethod(self.class_name, self.meth_name, args) - - def __repr__(self): - return '' % (self.class_name, self.meth_name) - - -class MethodWrapper(object): - __slots__ = ('meth_name',) - - def __init__(self, meth_name): - self.meth_name = meth_name - - def __get__(self, obj, type_): - if obj is None: - return UnboundMethod(type_, self.meth_name) - else: - return BoundMethod(self.meth_name, obj) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, repr(self.meth_name)) - - -class UnboundMethod(object): - __slots__ = ('im_class', 'im_name') - - def __init__(self, im_class, im_name): - self.im_class = im_class - self.im_name = im_name - - def __raise_TypeError(self, thing): - raise TypeError, 'unbound method %s() must be called with %s ' \ - 'instance as first argument (got %s instead)' % \ - (self.im_name, self.im_class.__cliclass__, thing) - - def __call__(self, *args): - if len(args) == 0: - self.__raise_TypeError('nothing') - im_self = args[0] - if not isinstance(im_self, self.im_class): - self.__raise_TypeError('%s instance' % im_self.__class__.__name__) - return im_self.__cliobj__.call_method(self.im_name, args, 1) # ignore the first arg - - def __repr__(self): - return '' % (self.im_class.__cliclass__, self.im_name) - - -class BoundMethod(object): - __slots__ = ('im_name', 'im_self') - - def __init__(self, im_name, im_self): - self.im_name = im_name - self.im_self = im_self - - def __call__(self, *args): - return self.im_self.__cliobj__.call_method(self.im_name, args) - - def __repr__(self): - return '' % (self.im_self.__class__.__cliclass__, - self.im_name, - self.im_self) - -class StaticProperty(object): - def __init__(self, fget=None, fset=None): - self.fget = fget - self.fset = fset - - def __get__(self, obj, type_): - return self.fget() - -def _qualify(t): - mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' - return '%s, %s' % (t, mscorlib) - -class MetaGenericCliClassWrapper(type): - _cli_types = { - int: _qualify('System.Int32'), - str: _qualify('System.String'), - bool: _qualify('System.Boolean'), - float: _qualify('System.Double'), - } - _System_Object = _qualify('System.Object') - - def _cli_name(cls, ttype): - if isinstance(ttype, MetaCliClassWrapper): - return '[%s]' % ttype.__fullyqualifiedname__ - else: - return '[%s]' % cls._cli_types.get(ttype, cls._System_Object) - - def __setattr__(cls, name, value): - obj = cls.__dict__.get(name, None) - if isinstance(obj, StaticProperty): - obj.fset(value) - else: - type.__setattr__(cls, name, value) - - def __getitem__(cls, type_or_tuple): - import clr - if isinstance(type_or_tuple, tuple): - types = type_or_tuple - else: - types = (type_or_tuple,) - namespace, generic_class = cls.__cliclass__.rsplit('.', 1) - generic_params = [cls._cli_name(t) for t in types] - instance_class = '%s[%s]' % (generic_class, ','.join(generic_params)) - try: - return clr.load_cli_class(cls.__assemblyname__, namespace, instance_class) - except ImportError: - raise TypeError, "Cannot load type %s.%s" % (namespace, instance_class) - -class MetaCliClassWrapper(type): - def __setattr__(cls, name, value): - obj = cls.__dict__.get(name, None) - if isinstance(obj, StaticProperty): - obj.fset(value) - else: - type.__setattr__(cls, name, value) - -class CliClassWrapper(object): - __slots__ = ('__cliobj__',) - - def __init__(self, *args): - import clr - self.__cliobj__ = clr._CliObject_internal(self.__fullyqualifiedname__, args) - - -class IEnumeratorWrapper(object): - def __init__(self, enumerator): - self.__enumerator__ = enumerator - - def __iter__(self): - return self - - def next(self): - if not self.__enumerator__.MoveNext(): - raise StopIteration - return self.__enumerator__.Current - -# this method need to be attached only to classes that implements IEnumerable (see build_wrapper) -def __iter__(self): - return IEnumeratorWrapper(self.GetEnumerator()) - -def wrapper_from_cliobj(cls, cliobj): - obj = cls.__new__(cls) - obj.__cliobj__ = cliobj - return obj - -def build_wrapper(namespace, classname, assemblyname, - staticmethods, methods, properties, indexers, - hasIEnumerable, isClassGeneric): - fullname = '%s.%s' % (namespace, classname) - assembly_qualified_name = '%s, %s' % (fullname, assemblyname) - d = {'__cliclass__': fullname, - '__fullyqualifiedname__': assembly_qualified_name, - '__assemblyname__': assemblyname, - '__module__': namespace} - for name in staticmethods: - d[name] = StaticMethodWrapper(assembly_qualified_name, name) - for name in methods: - d[name] = MethodWrapper(name) - - # check if IEnumerable is implemented - if hasIEnumerable: - d['__iter__'] = __iter__ - - assert len(indexers) <= 1 - if indexers: - name, getter, setter, is_static = indexers[0] - assert not is_static - if getter: - d['__getitem__'] = d[getter] - if setter: - d['__setitem__'] = d[setter] - if isClassGeneric: - cls = MetaGenericCliClassWrapper(classname, (CliClassWrapper,), d) - else: - cls = MetaCliClassWrapper(classname, (CliClassWrapper,), d) - - # we must add properties *after* the class has been created - # because we need to store UnboundMethods as getters and setters - for (name, getter, setter, is_static) in properties: - fget = None - fset = None - if getter: - fget = getattr(cls, getter) - if setter: - fset = getattr(cls, setter) - if is_static: - prop = StaticProperty(fget, fset) - else: - prop = property(fget, fset) - setattr(cls, name, prop) - - return cls diff --git a/pypy/module/clr/app_importer.py b/pypy/module/clr/app_importer.py deleted file mode 100644 --- a/pypy/module/clr/app_importer.py +++ /dev/null @@ -1,85 +0,0 @@ -"""NOT_RPYTHON""" - -# Meta hooks are called at the start of Import Processing -# Meta hooks can override the sys.path, frozen modules , built-in modules -# To register a Meta Hook simply add importer object to sys.meta_path - -import sys -import types - -class importer(object): - ''' - If the importer is installed on sys.meta_path, it will - receive a second argument, which is None for a top-level module, or - package.__path__ for submodules or subpackages - - It should return a loader object if the module was found, or None if it wasn\'t. - If find_module() raises an exception, the caller will abort the import. - When importer.find_module("spam.eggs.ham") is called, "spam.eggs" has already - been imported and added to sys.modules. - ''' - - def find_module(self, fullname, path=None): - import clr - namespaces, classes, generics = clr.get_assemblies_info() - - if fullname in namespaces or fullname in classes: - return self # fullname is a .NET Module - else: - return None # fullname is not a .NET Module - - def load_module(self, fullname): - ''' - The load_module() must fulfill the following *before* it runs any code: - Note that the module object *must* be in sys.modules before the - loader executes the module code. - - A If 'fullname' exists in sys.modules, the loader must use that - else the loader must create a new module object and add it to sys.modules. - - module = sys.modules.setdefault(fullname, new.module(fullname)) - - B The __file__ attribute must be set. String say "" - - C The __name__ attribute must be set. If one uses - imp.new_module() then the attribute is set automatically. - - D If it\'s a package, the __path__ variable must be set. This must - be a list, but may be empty if __path__ has no further - significance to the importer (more on this later). - - E It should add a __loader__ attribute to the module, set to the loader object. - - ''' - # If it is a call for a Class then return with the Class reference - import clr - namespaces, classes, generics = clr.get_assemblies_info() - - if fullname in classes: - assemblyname = classes[fullname] - fullname = generics.get(fullname, fullname) - ns, classname = fullname.rsplit('.', 1) - sys.modules[fullname] = clr.load_cli_class(assemblyname, ns, classname) - else: # if not a call for actual class (say for namespaces) assign an empty module - if fullname not in sys.modules: - mod = CLRModule(fullname) - mod.__file__ = "<%s>" % self.__class__.__name__ - mod.__loader__ = self - mod.__name__ = fullname - # add it to the modules dict - sys.modules[fullname] = mod - - # if it is a PACKAGE then we are to initialize the __path__ for the module - # we won't deal with Packages here - return sys.modules[fullname] - -class CLRModule(types.ModuleType): - def __getattr__(self, name): - if not name.startswith("__"): - try: - iname = self.__name__ + '.' + name - __import__(iname) - except ImportError: - pass - return types.ModuleType.__getattribute__(self, name) - diff --git a/pypy/module/clr/assemblyname.py b/pypy/module/clr/assemblyname.py deleted file mode 100644 --- a/pypy/module/clr/assemblyname.py +++ /dev/null @@ -1,2 +0,0 @@ -mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' -System = 'System, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py deleted file mode 100644 --- a/pypy/module/clr/boxing_rules.py +++ /dev/null @@ -1,53 +0,0 @@ -from rpython.tool.pairtype import extendabletype -from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.floatobject import W_FloatObject -from pypy.objspace.std.boolobject import W_BoolObject -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.stringobject import W_StringObject -from rpython.translator.cli.dotnet import box - -class __extend__(W_Root): - __metaclass__ = extendabletype - - def tocli(self): - return box(self) - -class __extend__(W_IntObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.intval) - -class __extend__(W_FloatObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.floatval) - -class __extend__(W_NoneObject): - __metaclass__ = extendabletype - - def tocli(self): - return None - -class __extend__(W_BoolObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.boolval) - -class __extend__(W_StringObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self._value) - -##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -##from rpython.rlib.nonconst import NonConstant - -##class __extend__(W_Object_Fake): -## __metaclass__ = extendabletype - -## def tocli(self): -## return NonConstant(None) diff --git a/pypy/module/clr/interp_clr.py b/pypy/module/clr/interp_clr.py deleted file mode 100644 --- a/pypy/module/clr/interp_clr.py +++ /dev/null @@ -1,364 +0,0 @@ -import os.path -from pypy.module.clr import assemblyname -from pypy.interpreter.baseobjspace import W_Root, W_Root -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, ApplevelClass -from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.dotnet import CLR, box, unbox, NativeException, native_exc,\ - new_array, init_array, typeof - -System = CLR.System -Assembly = CLR.System.Reflection.Assembly -TargetInvocationException = NativeException(CLR.System.Reflection.TargetInvocationException) -AmbiguousMatchException = NativeException(CLR.System.Reflection.AmbiguousMatchException) - -def get_method(space, b_type, name, b_paramtypes): - try: - method = b_type.GetMethod(name, b_paramtypes) - except AmbiguousMatchException: - msg = 'Multiple overloads for %s could match' - raise operationerrfmt(space.w_TypeError, msg, name) - if method is None: - msg = 'No overloads for %s could match' - raise operationerrfmt(space.w_TypeError, msg, name) - return method - -def get_constructor(space, b_type, b_paramtypes): - try: - ctor = b_type.GetConstructor(b_paramtypes) - except AmbiguousMatchException: - msg = 'Multiple constructors could match' - raise OperationError(space.w_TypeError, space.wrap(msg)) - if ctor is None: - msg = 'No overloads for constructor could match' - raise OperationError(space.w_TypeError, space.wrap(msg)) - return ctor - -def rewrap_args(space, w_args, startfrom): - args = space.unpackiterable(w_args) - paramlen = len(args)-startfrom - b_args = new_array(System.Object, paramlen) - b_paramtypes = new_array(System.Type, paramlen) - for i in range(startfrom, len(args)): - j = i-startfrom - b_obj = py2cli(space, args[i]) - b_args[j] = b_obj - if b_obj is None: - b_paramtypes[j] = typeof(System.Object) # we really can't be more precise - else: - b_paramtypes[j] = b_obj.GetType() # XXX: potentially inefficient - return b_args, b_paramtypes - - -def call_method(space, b_obj, b_type, name, w_args, startfrom): - b_args, b_paramtypes = rewrap_args(space, w_args, startfrom) - b_meth = get_method(space, b_type, name, b_paramtypes) - try: - # for an explanation of the box() call, see the log message for revision 35167 - b_res = box(b_meth.Invoke(b_obj, b_args)) - except TargetInvocationException, e: - b_inner = native_exc(e).get_InnerException() - message = str(b_inner.get_Message()) - # TODO: use the appropriate exception, not StandardError - raise OperationError(space.w_StandardError, space.wrap(message)) - if b_meth.get_ReturnType().get_Name() == 'Void': - return space.w_None - else: - return cli2py(space, b_res) - - at unwrap_spec(typename=str, methname=str) -def call_staticmethod(space, typename, methname, w_args): - """ - Call a .NET static method. - - Parameters: - - - typename: the fully qualified .NET name of the class - containing the method (e.g. ``System.Math``) - - - methname: the name of the static method to call (e.g. ``Abs``) - - - args: a list containing the arguments to be passed to the - method. - """ - b_type = System.Type.GetType(typename) # XXX: cache this! - return call_method(space, None, b_type, methname, w_args, 0) - -def py2cli(space, w_obj): - try: - cliobj = space.getattr(w_obj, space.wrap('__cliobj__')) - except OperationError, e: - if e.match(space, space.w_AttributeError): - # it hasn't got a __cloobj__ - return w_obj.tocli() - else: - raise - else: - if isinstance(cliobj, W_CliObject): - return cliobj.b_obj # unwrap it! - else: - # this shouldn't happen! Fallback to the default impl - return w_obj.tocli() - -def cli2py(space, b_obj): - # TODO: support other types and find the most efficient way to - # select the correct case - if b_obj is None: - return space.w_None - - w_obj = unbox(b_obj, W_Root) - if w_obj is not None: - return w_obj # it's already a wrapped object! - - b_type = b_obj.GetType() - if b_type == typeof(System.Int32): - intval = unbox(b_obj, ootype.Signed) - return space.wrap(intval) - elif b_type == typeof(System.Double): - floatval = unbox(b_obj, ootype.Float) - return space.wrap(floatval) - elif b_type == typeof(System.Boolean): - boolval = unbox(b_obj, ootype.Bool) - return space.wrap(boolval) - elif b_type == typeof(System.String): - strval = unbox(b_obj, ootype.String) - return space.wrap(strval) - else: - namespace, classname = split_fullname(b_type.ToString()) - assemblyname = b_type.get_Assembly().get_FullName() - w_cls = load_cli_class(space, assemblyname, namespace, classname) - cliobj = W_CliObject(space, b_obj) - return wrapper_from_cliobj(space, w_cls, cliobj) - -def split_fullname(name): - lastdot = name.rfind('.') - if lastdot < 0: - return '', name - return name[:lastdot], name[lastdot+1:] - -def wrap_list_of_tuples(space, lst): - list_w = [] - for (a,b,c,d) in lst: - items_w = [space.wrap(a), space.wrap(b), space.wrap(c), space.wrap(d)] - list_w.append(space.newtuple(items_w)) - return space.newlist(list_w) - -def wrap_list_of_pairs(space, lst): - list_w = [] - for (a,b) in lst: - items_w = [space.wrap(a), space.wrap(b)] - list_w.append(space.newtuple(items_w)) - return space.newlist(list_w) - -def wrap_list_of_strings(space, lst): - list_w = [space.wrap(s) for s in lst] - return space.newlist(list_w) - -def get_methods(space, b_type): - methods = [] - staticmethods = [] - b_methodinfos = b_type.GetMethods() - for i in range(len(b_methodinfos)): - b_meth = b_methodinfos[i] - if b_meth.get_IsPublic(): - if b_meth.get_IsStatic(): - staticmethods.append(str(b_meth.get_Name())) - else: - methods.append(str(b_meth.get_Name())) - w_staticmethods = wrap_list_of_strings(space, staticmethods) - w_methods = wrap_list_of_strings(space, methods) - return w_staticmethods, w_methods - -def get_properties(space, b_type): - properties = [] - indexers = {} - b_propertyinfos = b_type.GetProperties() - for i in range(len(b_propertyinfos)): - b_prop = b_propertyinfos[i] - get_name = None - set_name = None - is_static = False - if b_prop.get_CanRead(): - get_meth = b_prop.GetGetMethod() - get_name = get_meth.get_Name() - is_static = get_meth.get_IsStatic() - if b_prop.get_CanWrite(): - set_meth = b_prop.GetSetMethod() - if set_meth: - set_name = set_meth.get_Name() - is_static = set_meth.get_IsStatic() - b_indexparams = b_prop.GetIndexParameters() - if len(b_indexparams) == 0: - properties.append((b_prop.get_Name(), get_name, set_name, is_static)) - else: - indexers[b_prop.get_Name(), get_name, set_name, is_static] = None - w_properties = wrap_list_of_tuples(space, properties) - w_indexers = wrap_list_of_tuples(space, indexers.keys()) - return w_properties, w_indexers - -class _CliClassCache: - def __init__(self): - self.cache = {} - - def put(self, fullname, cls): - assert fullname not in self.cache - self.cache[fullname] = cls - - def get(self, fullname): - return self.cache.get(fullname, None) -CliClassCache = _CliClassCache() - -class _AssembliesInfo: - w_namespaces = None - w_classes = None - w_generics = None - w_info = None # a tuple containing (w_namespaces, w_classes, w_generics) -AssembliesInfo = _AssembliesInfo() - -def save_info_for_assembly(space, b_assembly): - info = AssembliesInfo - b_types = b_assembly.GetTypes() - w_assemblyName = space.wrap(b_assembly.get_FullName()) - for i in range(len(b_types)): - b_type = b_types[i] - namespace = b_type.get_Namespace() - fullname = b_type.get_FullName() - if '+' in fullname: - # it's an internal type, skip it - continue - if namespace is not None: - # builds all possible sub-namespaces - # (e.g. 'System', 'System.Windows', 'System.Windows.Forms') - chunks = namespace.split(".") - temp_name = chunks[0] - space.setitem(info.w_namespaces, space.wrap(temp_name), space.w_None) - for chunk in chunks[1:]: - temp_name += "."+chunk - space.setitem(info.w_namespaces, space.wrap(temp_name), space.w_None) - if b_type.get_IsGenericType(): - index = fullname.rfind("`") - assert index >= 0 - pyName = fullname[0:index] - space.setitem(info.w_classes, space.wrap(pyName), w_assemblyName) - space.setitem(info.w_generics, space.wrap(pyName), space.wrap(fullname)) - else: - space.setitem(info.w_classes, space.wrap(fullname), w_assemblyName) - - -def save_info_for_std_assemblies(space): - # in theory we should use Assembly.Load, but it doesn't work with - # pythonnet because it thinks it should use the Load(byte[]) overload - b_mscorlib = Assembly.LoadWithPartialName(assemblyname.mscorlib) - b_System = Assembly.LoadWithPartialName(assemblyname.System) - save_info_for_assembly(space, b_mscorlib) - save_info_for_assembly(space, b_System) - -def get_assemblies_info(space): - info = AssembliesInfo - if info.w_info is None: - info.w_namespaces = space.newdict() - info.w_classes = space.newdict() - info.w_generics = space.newdict() - info.w_info = space.newtuple([info.w_namespaces, info.w_classes, info.w_generics]) - save_info_for_std_assemblies(space) - return info.w_info - -#_______________________________________________________________________________ -# AddReference* methods - -# AddReference', 'AddReferenceByName', 'AddReferenceByPartialName', 'AddReferenceToFile', 'AddReferenceToFileAndPath' - - at unwrap_spec(name=str) -def AddReferenceByPartialName(space, name): - b_assembly = Assembly.LoadWithPartialName(name) - if b_assembly is not None: - save_info_for_assembly(space, b_assembly) - - - at unwrap_spec(assemblyname=str, namespace=str, classname=str) -def load_cli_class(space, assemblyname, namespace, classname): - """ - Load the given .NET class into the PyPy interpreter and return a - Python class referencing to it. - - Parameters: - - - namespace: the full name of the namespace containing the - class (e.g., ``System.Collections``). - - - classname: the name of the class in the specified namespace - (e.g. ``ArrayList``). """ - fullname = '%s.%s' % (namespace, classname) - w_cls = CliClassCache.get(fullname) - if w_cls is None: - w_cls = build_cli_class(space, namespace, classname, fullname, assemblyname) - CliClassCache.put(fullname, w_cls) - return w_cls - -def build_cli_class(space, namespace, classname, fullname, assemblyname): - assembly_qualified_name = '%s, %s' % (fullname, assemblyname) - b_type = System.Type.GetType(assembly_qualified_name) - if b_type is None: - raise operationerrfmt(space.w_ImportError, - "Cannot load .NET type: %s", fullname) - - # this is where we locate the interfaces inherited by the class - # set the flag hasIEnumerable if IEnumerable interface has been by the class - hasIEnumerable = b_type.GetInterface("System.Collections.IEnumerable") is not None - - # this is where we test if the class is Generic - # set the flag isClassGeneric - isClassGeneric = False - if b_type.get_IsGenericType(): - isClassGeneric = True - - w_staticmethods, w_methods = get_methods(space, b_type) - w_properties, w_indexers = get_properties(space, b_type) - return build_wrapper(space, - space.wrap(namespace), - space.wrap(classname), - space.wrap(assemblyname), - w_staticmethods, - w_methods, - w_properties, - w_indexers, - space.wrap(hasIEnumerable), - space.wrap(isClassGeneric)) - - -class W_CliObject(W_Root): - def __init__(self, space, b_obj): - self.space = space - self.b_obj = b_obj - - @unwrap_spec(name=str, startfrom=int) - def call_method(self, name, w_args, startfrom=0): - return call_method(self.space, self.b_obj, self.b_obj.GetType(), name, w_args, startfrom) - - at unwrap_spec(typename=str) -def cli_object_new(space, w_subtype, typename, w_args): - b_type = System.Type.GetType(typename) - b_args, b_paramtypes = rewrap_args(space, w_args, 0) - b_ctor = get_constructor(space, b_type, b_paramtypes) - try: - b_obj = b_ctor.Invoke(b_args) - except TargetInvocationException, e: - b_inner = native_exc(e).get_InnerException() - message = str(b_inner.get_Message()) - # TODO: use the appropriate exception, not StandardError - raise OperationError(space.w_StandardError, space.wrap(message)) - return space.wrap(W_CliObject(space, b_obj)) - -W_CliObject.typedef = TypeDef( - '_CliObject_internal', - __new__ = interp2app(cli_object_new), - call_method = interp2app(W_CliObject.call_method), - ) - -path, _ = os.path.split(__file__) -app_clr = os.path.join(path, 'app_clr.py') -app = ApplevelClass(file(app_clr).read()) -del path, app_clr -build_wrapper = app.interphook("build_wrapper") -wrapper_from_cliobj = app.interphook("wrapper_from_cliobj") diff --git a/pypy/module/clr/test/__init__.py b/pypy/module/clr/test/__init__.py deleted file mode 100644 --- a/pypy/module/clr/test/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# diff --git a/pypy/module/clr/test/test_clr.py b/pypy/module/clr/test/test_clr.py deleted file mode 100644 --- a/pypy/module/clr/test/test_clr.py +++ /dev/null @@ -1,292 +0,0 @@ -from pypy.module.clr.assemblyname import mscorlib - -def skip_if_not_pythonnet(): - import py - try: - import clr - except ImportError: - py.test.skip('Must use pythonnet to access .NET libraries') - -skip_if_not_pythonnet() - -class AppTestDotnet: - spaceconfig = dict(usemodules=('clr',)) - - def setup_class(cls): - cls.w_mscorlib = cls.space.wrap(mscorlib) - - def test_cliobject(self): - import clr - obj = clr._CliObject_internal('System.Collections.ArrayList', []) - max_index = obj.call_method('Add', [42]) - assert max_index == 0 - - def test_cache(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - ArrayList2 = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - assert ArrayList is ArrayList2 - - def test_load_fail(self): - import clr - raises(ImportError, clr.load_cli_class, self.mscorlib, 'Foo', 'Bar') - - def test_ArrayList(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - obj.Add(43) - total = obj.get_Item(0) + obj.get_Item(1) - assert total == 42+43 - - def test_ArrayList_error(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - raises(StandardError, obj.get_Item, 0) - - def test_float_conversion(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42.0) - item = obj.get_Item(0) - assert isinstance(item, float) - - def test_bool_conversion(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(True) - obj.Add(False) - t = obj.get_Item(0) - f = obj.get_Item(1) - assert t and isinstance(t, bool) - assert not f and isinstance(f, bool) - obj.Add(42) - assert obj.Contains(42) - - def test_getitem(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - assert obj[0] == 42 - - def test_property(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - assert obj.Count == 1 - obj.Capacity = 10 - assert obj.Capacity == 10 - - def test_unboundmethod(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - ArrayList.Add(obj, 42) - assert obj.get_Item(0) == 42 - - def test_unboundmethod_typeerror(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - raises(TypeError, ArrayList.Add) - raises(TypeError, ArrayList.Add, 0) - - def test_overload(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - for i in range(10): - obj.Add(i) - assert obj.IndexOf(7) == 7 - assert obj.IndexOf(7, 0, 5) == -1 - - def test_wrong_overload(self): - import clr - Math = clr.load_cli_class(self.mscorlib, 'System', 'Math') - raises(TypeError, Math.Abs, "foo") - - def test_wrong_overload_ctor(self): - from System.Collections import ArrayList - raises(TypeError, ArrayList, "foo") - - def test_staticmethod(self): - import clr - Math = clr.load_cli_class(self.mscorlib, 'System', 'Math') - res = Math.Abs(-42) - assert res == 42 - assert type(res) is int - res = Math.Abs(-42.0) - assert res == 42.0 - assert type(res) is float - - def test_constructor_args(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList(42) - assert obj.Capacity == 42 - - def test_None_as_null(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - Hashtable = clr.load_cli_class(self.mscorlib, 'System.Collections', 'Hashtable') - x = ArrayList() - x.Add(None) - assert x[0] is None - y = Hashtable() - assert y["foo"] is None - - def test_pass_opaque_arguments(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - class Foo: - pass - obj = Foo() - x = ArrayList() - x.Add(obj) - obj2 = x[0] - assert obj is obj2 - - def test_string_wrapping(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - x.Add("bar") - s = x[0] - assert s == "bar" - - def test_static_property(self): - import clr - import os - Environment = clr.load_cli_class(self.mscorlib, 'System', 'Environment') - assert Environment.CurrentDirectory == os.getcwd() - Environment.CurrentDirectory == '/' - assert Environment.CurrentDirectory == os.getcwd() - - def test_GetEnumerator(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - enum = x.GetEnumerator() - assert enum.MoveNext() is False - - def test_iteration_arrayList(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - x.Add(1) - x.Add(2) - x.Add(3) - x.Add(4) - sum = 0 - for i in x: - sum += i - assert sum == 1+2+3+4 - - def test_iteration_stack(self): - import clr - Stack = clr.load_cli_class(self.mscorlib, 'System.Collections', 'Stack') - obj = Stack() - obj.Push(1) - obj.Push(54) - obj.Push(21) - sum = 0 - for i in obj: - sum += i - assert sum == 1+54+21 - - def test_load_generic_class(self): - import clr - ListInt = clr.load_cli_class(self.mscorlib, "System.Collections.Generic", "List`1[System.Int32]") - x = ListInt() - x.Add(42) - x.Add(4) - x.Add(4) - sum = 0 - for i in x: - sum += i - assert sum == 42+4+4 - - def test_generic_class_typeerror(self): - import clr - ListInt = clr.load_cli_class(self.mscorlib, "System.Collections.Generic", "List`1[System.Int32]") - x = ListInt() - raises(TypeError, x.Add, "test") - - def test_generic_dict(self): - import clr - genDictIntStr = clr.load_cli_class(self.mscorlib, - "System.Collections.Generic", - "Dictionary`2[System.Int32,System.String]") - x = genDictIntStr() - x[1] = "test" - x[2] = "rest" - assert x[1] == "test" - assert x[2] == "rest" - raises(TypeError, x.__setitem__, 3, 3) - raises(TypeError, x.__setitem__, 4, 4.453) - raises(TypeError, x.__setitem__, "test", 3) - - def test_generic_metaclass_list(self): - import clr - from System.Collections.Generic import List - import System.Int32 - lst = List[System.Int32]() - lst.Add(42) - assert lst[0] == 42 - raises(TypeError, lst.Add, "test") - - lst = List[int]() - lst.Add(42) - assert lst[0] == 42 - raises(TypeError, lst.Add, "test") - - def test_generic_metaclass_dict(self): - import clr - from System.Collections.Generic import Dictionary - import System.Int32 - import System.String - d1 = Dictionary[System.Int32, System.String]() - d1[42]="test" - assert d1[42] == "test" - raises(TypeError, d1.__setitem__, 42, 42) - - d1 = Dictionary[int, str]() - d1[42]="test" - assert d1[42] == "test" - raises(TypeError, d1.__setitem__, 42, 42) - - def test_generic_metaclass_object(self): - import clr - from System.Collections.Generic import List - class Foo(object): - pass - lst = List[Foo]() - f = Foo() - lst.Add(f) - assert lst[0] is f - - def test_generic_metaclass_typeerror(self): - import clr - from System.Collections.Generic import List - raises(TypeError, "List[int, int]") - - def test_py2cli_cliobjects(self): - from System.IO import StreamReader, MemoryStream - mem = MemoryStream(100) - sr = StreamReader(mem) # does not raise - - def test_external_assemblies(self): - import clr - clr.AddReferenceByPartialName('System.Xml') - from System.IO import StringReader - from System.Xml import XmlReader - buffer = StringReader("test") - xml = XmlReader.Create(buffer) - xml.ReadStartElement("foo") - assert xml.ReadString() == 'test' - xml.ReadEndElement() diff --git a/pypy/module/clr/test/test_importer.py b/pypy/module/clr/test/test_importer.py deleted file mode 100644 --- a/pypy/module/clr/test/test_importer.py +++ /dev/null @@ -1,76 +0,0 @@ -from pypy.module.clr.test.test_clr import skip_if_not_pythonnet - -skip_if_not_pythonnet() - -class AppTestDotnet: - spaceconfig = dict(usemodules=('clr',)) - - def test_list_of_namespaces_and_classes(self): - import clr - ns, classes, generics = clr.get_assemblies_info() - - assert 'System' in ns - assert 'System.Collections' in ns - assert 'System.Runtime' in ns - assert 'System.Runtime.InteropServices' in ns - - assert 'System' not in classes - assert 'System.Math' in classes - assert 'System.Collections.ArrayList' in classes - - assert 'System.Collections.Generic.List' in classes - assert generics['System.Collections.Generic.List'] == 'System.Collections.Generic.List`1' - - def test_import_hook_simple(self): - mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' - import clr - import System.Math - - assert System.Math.Abs(-5) == 5 - assert System.Math.Pow(2, 5) == 2**5 - - Math = clr.load_cli_class(mscorlib, 'System', 'Math') - assert Math is System.Math - - import System - a = System.Collections.Stack() - a.Push(3) - a.Push(44) - sum = 0 - for i in a: - sum += i - assert sum == 3+44 - - import System.Collections.ArrayList - ArrayList = clr.load_cli_class(mscorlib, 'System.Collections', 'ArrayList') - assert ArrayList is System.Collections.ArrayList - - def test_ImportError(self): - def fn(): - import non_existent_module - raises(ImportError, fn) - - def test_import_twice(self): - import System - s1 = System - import System - assert s1 is System - - def test_lazy_import(self): - import System - System.Runtime.InteropServices # does not raise attribute error - - def test_generic_class_import(self): - import System.Collections.Generic.List - - def test_import_from(self): - from System.Collections import ArrayList - - def test_AddReferenceByPartialName(self): - import clr - clr.AddReferenceByPartialName('System.Xml') - import System.Xml.XmlReader # does not raise - - def test_AddReference_early(self): - import clr - clr.AddReferenceByPartialName('System.Xml') diff --git a/pypy/module/clr/test/test_interp_clr.py b/pypy/module/clr/test/test_interp_clr.py deleted file mode 100644 --- a/pypy/module/clr/test/test_interp_clr.py +++ /dev/null @@ -1,10 +0,0 @@ -from pypy.module.clr.interp_clr import split_fullname - -def test_split_fullname(): - split = split_fullname - assert split('Foo') == ('', 'Foo') - assert split('System.Foo') == ('System', 'Foo') - assert split('System.Foo.Bar') == ('System.Foo', 'Bar') - assert split('System.Foo.A+B') == ('System.Foo', 'A+B') - assert split('System.') == ('System', '') - diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -4,26 +4,6 @@ import os exec 'import %s as posix' % os.name -# this is the list of function which is *not* present in the posix module of -# IronPython 2.6, and that we want to ignore for now -lltype_only_defs = [ - 'chown', 'chroot', 'closerange', 'confstr', 'confstr_names', 'ctermid', 'dup', - 'dup2', 'execv', 'execve', 'fchdir', 'fchmod', 'fchown', 'fdatasync', 'fork', - 'forkpty', 'fpathconf', 'fstatvfs', 'fsync', 'ftruncate', 'getegid', 'geteuid', - 'getgid', 'getgroups', 'getloadavg', 'getlogin', 'getpgid', 'getpgrp', 'getppid', - 'getsid', 'getuid', 'kill', 'killpg', 'lchown', 'link', 'lseek', 'major', - 'makedev', 'minor', 'mkfifo', 'mknod', 'nice', 'openpty', 'pathconf', 'pathconf_names', - 'pipe', 'readlink', 'setegid', 'seteuid', 'setgid', 'setgroups', 'setpgid', 'setpgrp', - 'setregid', 'setreuid', 'setsid', 'setuid', 'stat_float_times', 'statvfs', - 'statvfs_result', 'symlink', 'sysconf', 'sysconf_names', 'tcgetpgrp', 'tcsetpgrp', - 'ttyname', 'uname', 'wait', 'wait3', 'wait4' -] - -# the Win32 urandom implementation isn't going to translate on JVM or CLI so -# we have to remove it -lltype_only_defs.append('urandom') - - class Module(MixedModule): """This module provides access to operating system functionality that is standardized by the C Standard and the POSIX standard (a thinly @@ -186,15 +166,6 @@ if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name - def __init__(self, space, w_name): - # if it's an ootype translation, remove all the defs that are lltype - # only - backend = space.config.translation.backend - if backend == 'cli' or backend == 'jvm' : - for name in lltype_only_defs: - self.interpleveldefs.pop(name, None) - MixedModule.__init__(self, space, w_name) - def startup(self, space): from pypy.module.posix import interp_posix interp_posix.get(space).startup(space) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -210,18 +210,12 @@ # ____________________________________________________________ -# For LL backends, expose all fields. -# For OO backends, only the portable fields (the first 10). STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) -PORTABLE_STAT_FIELDS = unrolling_iterable( - enumerate(ll_os_stat.PORTABLE_STAT_FIELDS)) + STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) def build_stat_result(space, st): - if space.config.translation.type_system == 'ootype': - FIELDS = PORTABLE_STAT_FIELDS - else: - FIELDS = STAT_FIELDS # also when not translating at all + FIELDS = STAT_FIELDS # also when not translating at all lst = [None] * ll_os_stat.N_INDEXABLE_FIELDS w_keywords = space.newdict() stat_float_times = space.fromcache(StatState).stat_float_times @@ -513,11 +507,7 @@ def getstatfields(space): # for app_posix.py: export the list of 'st_xxx' names that we know # about at RPython level - if space.config.translation.type_system == 'ootype': - FIELDS = PORTABLE_STAT_FIELDS - else: - FIELDS = STAT_FIELDS # also when not translating at all - return space.newlist([space.wrap(name) for _, (name, _) in FIELDS]) + return space.newlist([space.wrap(name) for _, (name, _) in STAT_FIELDS]) class State: diff --git a/pypy/module/sys/interp_encoding.py b/pypy/module/sys/interp_encoding.py --- a/pypy/module/sys/interp_encoding.py +++ b/pypy/module/sys/interp_encoding.py @@ -3,12 +3,12 @@ from rpython.rlib.objectmodel import we_are_translated def getdefaultencoding(space): - """Return the current default string encoding used by the Unicode + """Return the current default string encoding used by the Unicode implementation.""" return space.wrap(space.sys.defaultencoding) def setdefaultencoding(space, w_encoding): - """Set the current default string encoding used by the Unicode + """Set the current default string encoding used by the Unicode implementation.""" encoding = space.str_w(w_encoding) mod = space.getbuiltinmodule("_codecs") @@ -37,10 +37,6 @@ base_encoding = None def _getfilesystemencoding(space): - if (space.config.translation.type_system == 'ootype'): - # XXX: fix this for ootype - return base_encoding - # encoding = base_encoding if rlocale.HAVE_LANGINFO and rlocale.CODESET: try: diff --git a/pypy/module/sys/system.py b/pypy/module/sys/system.py --- a/pypy/module/sys/system.py +++ b/pypy/module/sys/system.py @@ -57,4 +57,4 @@ return space.call_function(w_long_info, space.newtuple(info_w)) def get_float_repr_style(space): - return space.wrap("short" if rfloat.USE_SHORT_FLOAT_REPR else "legacy") + return space.wrap("short") diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -493,9 +493,6 @@ def unicode_startswith__Unicode_Unicode_ANY_ANY(space, w_self, w_substr, w_start, w_end): self, start, end = _convert_idx_params(space, w_self, w_start, w_end, True) - # XXX this stuff can be waaay better for ootypebased backends if - # we re-use more of our rpython machinery (ie implement startswith - # with additional parameters as rpython) return space.newbool(startswith(self, w_substr._value, start, end)) def unicode_startswith__Unicode_ANY_ANY_ANY(space, w_unistr, w_prefixes, diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -1,7 +1,7 @@ """ A file that invokes translation of PyPy with the JIT enabled. -Run it with py.test -s --pdb pypyjit.py [--ootype] +Run it with py.test -s --pdb pypyjit.py """ @@ -14,20 +14,9 @@ from rpython.rtyper.annlowlevel import llhelper, llstr, oostr, hlstr from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.ootypesystem import ootype from pypy.interpreter.pycode import PyCode from rpython.translator.goal import unixcheckpoint -if not hasattr(py.test.config.option, 'ootype'): - import sys - print >> sys.stderr, __doc__ - sys.exit(2) - -if py.test.config.option.ootype: - BACKEND = 'cli' -else: - BACKEND = 'c' - config = get_pypy_config(translating=True) config.translation.backendopt.inline_threshold = 0.1 config.translation.gc = 'boehm' @@ -45,15 +34,8 @@ # set_pypy_opt_level(config, level='jit') -if BACKEND == 'c': - config.objspace.std.multimethods = 'mrd' - multimethod.Installer = multimethod.InstallerVersion2 -elif BACKEND == 'cli': - config.objspace.std.multimethods = 'doubledispatch' - multimethod.Installer = multimethod.InstallerVersion1 - config.translation.backend = 'cli' -else: - assert False +config.objspace.std.multimethods = 'mrd' +multimethod.Installer = multimethod.InstallerVersion2 print config import sys, pdb @@ -82,13 +64,8 @@ code = ec.compiler.compile(source, filename, 'exec', 0) return llstr(space.str_w(dumps(space, code, space.wrap(2)))) -if BACKEND == 'c': - FPTR = lltype.Ptr(lltype.FuncType([], lltype.Ptr(STR))) - read_code_ptr = llhelper(FPTR, read_code) -else: - llstr = oostr - FUNC = ootype.StaticMethod([], ootype.String) - read_code_ptr = llhelper(FUNC, read_code) +FPTR = lltype.Ptr(lltype.FuncType([], lltype.Ptr(STR))) +read_code_ptr = llhelper(FPTR, read_code) def entry_point(): from pypy.module.marshal.interp_marshal import loads @@ -117,13 +94,8 @@ from rpython.jit.codewriter.codewriter import CodeWriter CodeWriter.debug = True - from rpython.jit.tl.pypyjit_child import run_child, run_child_ootype - if BACKEND == 'c': - run_child(globals(), locals()) - elif BACKEND == 'cli': - run_child_ootype(globals(), locals()) - else: - assert False + from rpython.jit.tl.pypyjit_child import run_child + run_child(globals(), locals()) if __name__ == '__main__': diff --git a/pypy/tool/pypyjit_child.py b/pypy/tool/pypyjit_child.py --- a/pypy/tool/pypyjit_child.py +++ b/pypy/tool/pypyjit_child.py @@ -19,14 +19,6 @@ apply_jit(interp, graph, LLtypeCPU) -def run_child_ootype(glob, loc): - import sys, pdb - interp = loc['interp'] - graph = loc['graph'] - from rpython.jit.backend.llgraph.runner import OOtypeCPU - apply_jit(interp, graph, OOtypeCPU) - - def apply_jit(interp, graph, CPUClass): print 'warmspot.jittify_and_run() started...' policy = PyPyJitPolicy() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -24,7 +24,6 @@ See description in doc/translation.txt.""" def __init__(self, translator=None, policy=None, bookkeeper=None): - import rpython.rtyper.ootypesystem.ooregistry # has side effects import rpython.rtyper.extfuncregistry # has side effects import rpython.rlib.nonconst # has side effects @@ -138,10 +137,10 @@ checkgraph(flowgraph) nbarg = len(flowgraph.getargs()) - if len(inputcells) != nbarg: - raise TypeError("%s expects %d args, got %d" %( + if len(inputcells) != nbarg: + raise TypeError("%s expects %d args, got %d" %( flowgraph, nbarg, len(inputcells))) - + # register the entry point self.addpendinggraph(flowgraph, inputcells) # recursively proceed until no more pending block is left @@ -267,7 +266,7 @@ pos = '?' if pos != '?': pos = self.whereami(pos) - + log.WARNING("%s/ %s" % (pos, msg)) @@ -297,7 +296,7 @@ v = graph.getreturnvar() try: return self.bindings[v] - except KeyError: + except KeyError: # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) return annmodel.s_ImpossibleValue diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -866,12 +866,8 @@ # ____________________________________________________________ # annotation of low-level types -from rpython.annotator.model import SomePtr, SomeOOInstance, SomeOOClass -from rpython.annotator.model import SomeOOObject +from rpython.annotator.model import SomePtr from rpython.annotator.model import ll_to_annotation, annotation_to_lltype -from rpython.rtyper.ootypesystem import ootype - -_make_none_union('SomeOOInstance', 'ootype=obj.ootype, can_be_None=True') class __extend__(pairtype(SomePtr, SomePtr)): def union((p1, p2)): @@ -912,41 +908,6 @@ return pair(p2, obj).union() -class __extend__(pairtype(SomeOOInstance, SomeOOInstance)): - def union((r1, r2)): - common = ootype.commonBaseclass(r1.ootype, r2.ootype) - assert common is not None, 'Mixing of incompatible instances %r, %r' %(r1.ootype, r2.ootype) - return SomeOOInstance(common, can_be_None=r1.can_be_None or r2.can_be_None) - -class __extend__(pairtype(SomeOOClass, SomeOOClass)): - def union((r1, r2)): - if r1.ootype is None: - common = r2.ootype - elif r2.ootype is None: - common = r1.ootype - elif r1.ootype == r2.ootype: - common = r1.ootype - elif isinstance(r1.ootype, ootype.Instance) and isinstance(r2.ootype, ootype.Instance): - common = ootype.commonBaseclass(r1.ootype, r2.ootype) - assert common is not None, ('Mixing of incompatible classes %r, %r' - % (r1.ootype, r2.ootype)) - else: - common = ootype.Object - return SomeOOClass(common) - -class __extend__(pairtype(SomeOOInstance, SomeObject)): - def union((r, obj)): - assert False, ("mixing reference type %r with something else %r" % (r.ootype, obj)) - -class __extend__(pairtype(SomeObject, SomeOOInstance)): - def union((obj, r2)): - return pair(r2, obj).union() - -class __extend__(pairtype(SomeOOObject, SomeOOObject)): - def union((r1, r2)): - assert r1.ootype is ootype.Object and r2.ootype is ootype.Object - return SomeOOObject() - #_________________________________________ # weakrefs diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -7,13 +7,12 @@ import sys, types, inspect, weakref from rpython.flowspace.model import Constant -from rpython.annotator.model import SomeString, SomeChar, SomeFloat, \ - SomePtr, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, \ - SomeInteger, SomeOOInstance, SomeOOObject, TLS, SomeAddress, \ - SomeUnicodeCodePoint, SomeOOStaticMeth, s_None, s_ImpossibleValue, \ - SomeLLADTMeth, SomeBool, SomeTuple, SomeOOClass, SomeImpossibleValue, \ - SomeUnicodeString, SomeList, HarmlesslyBlocked, \ - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray +from rpython.annotator.model import ( + SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, + SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, + s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, + SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, + SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -23,7 +22,6 @@ from rpython.rlib.objectmodel import r_dict, Symbolic from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.ootypesystem import ootype from rpython.rtyper import extregistry @@ -433,16 +431,6 @@ result = SomePtr(lltype.typeOf(x)) elif isinstance(x, llmemory.fakeaddress): result = SomeAddress() - elif isinstance(x, ootype._static_meth): - result = SomeOOStaticMeth(ootype.typeOf(x)) - elif isinstance(x, ootype._class): - result = SomeOOClass(x._INSTANCE) # NB. can be None - elif isinstance(x, ootype.instance_impl): # XXX - result = SomeOOInstance(ootype.typeOf(x)) - elif isinstance(x, (ootype._record, ootype._string)): - result = SomeOOInstance(ootype.typeOf(x)) - elif isinstance(x, (ootype._object)): - result = SomeOOObject() elif tp is type: if (x is type(None) or # add cases here if needed x.__module__ == 'rpython.rtyper.lltypesystem.lltype'): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -3,20 +3,15 @@ """ import sys -from rpython.annotator.model import SomeInteger, SomeObject, SomeChar, SomeBool -from rpython.annotator.model import SomeString, SomeTuple, s_Bool -from rpython.annotator.model import SomeUnicodeCodePoint, SomeAddress -from rpython.annotator.model import SomeFloat, unionof, SomeUnicodeString -from rpython.annotator.model import SomePBC, SomeInstance, SomeDict, SomeList -from rpython.annotator.model import SomeWeakRef, SomeIterator -from rpython.annotator.model import SomeOOObject, SomeByteArray -from rpython.annotator.model import annotation_to_lltype, lltype_to_annotation, ll_to_annotation -from rpython.annotator.model import add_knowntypedata -from rpython.annotator.model import s_ImpossibleValue +from rpython.annotator.model import ( + SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, + SomeUnicodeCodePoint, SomeAddress, SomeFloat, unionof, SomeUnicodeString, + SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, + SomeByteArray, annotation_to_lltype, lltype_to_annotation, + ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant -from rpython.tool.error import AnnotatorError import rpython.rlib.rarithmetic import rpython.rlib.objectmodel @@ -490,7 +485,7 @@ return SomePtr(ll_ptrtype=PtrT.const) def identityhash(s_obj): - assert isinstance(s_obj, (SomePtr, SomeOOObject, SomeOOInstance)) + assert isinstance(s_obj, SomePtr) return SomeInteger() def getRuntimeTypeInfo(T): @@ -523,92 +518,6 @@ BUILTIN_ANALYZERS[lltype.runtime_type_info] = runtime_type_info BUILTIN_ANALYZERS[lltype.Ptr] = constPtr -# ootype -from rpython.annotator.model import SomeOOInstance, SomeOOClass, SomeOOStaticMeth -from rpython.rtyper.ootypesystem import ootype - -def new(I): - assert I.is_constant() - i = ootype.new(I.const) - r = SomeOOInstance(ootype.typeOf(i)) - return r - -def oonewarray(s_type, length): - assert s_type.is_constant() - return SomeOOInstance(s_type.const) - -def null(I_OR_SM): - assert I_OR_SM.is_constant() - null = ootype.null(I_OR_SM.const) - r = lltype_to_annotation(ootype.typeOf(null)) - return r - -def instanceof(i, I): - assert I.is_constant() - assert isinstance(I.const, ootype.Instance) - return s_Bool - -def classof(i): - assert isinstance(i, SomeOOInstance) - return SomeOOClass(i.ootype) - -def subclassof(class1, class2): - assert isinstance(class1, SomeOOClass) - assert isinstance(class2, SomeOOClass) - return s_Bool - -def runtimenew(c): - assert isinstance(c, SomeOOClass) - if c.ootype is None: - return s_ImpossibleValue # can't call runtimenew(NULL) - else: - return SomeOOInstance(c.ootype) - -def ooupcast(I, i): - assert isinstance(I.const, ootype.Instance) - if ootype.isSubclass(i.ootype, I.const): - return SomeOOInstance(I.const) - else: - raise AnnotatorError, 'Cannot cast %s to %s' % (i.ootype, I.const) - -def oodowncast(I, i): - assert isinstance(I.const, ootype.Instance) - if ootype.isSubclass(I.const, i.ootype): - return SomeOOInstance(I.const) - else: - raise AnnotatorError, 'Cannot cast %s to %s' % (i.ootype, I.const) - -def cast_to_object(obj): - assert isinstance(obj, SomeOOStaticMeth) or \ - (isinstance(obj, SomeOOClass) and obj.ootype is None) or \ - isinstance(obj.ootype, ootype.OOType) - return SomeOOObject() - -def cast_from_object(T, obj): - TYPE = T.const - if TYPE is ootype.Object: - return SomeOOObject() - elif TYPE is ootype.Class: - return SomeOOClass(ootype.ROOT) # ??? - elif isinstance(TYPE, ootype.StaticMethod): - return SomeOOStaticMeth(TYPE) - elif isinstance(TYPE, ootype.OOType): - return SomeOOInstance(TYPE) - else: - raise AnnotatorError, 'Cannot cast Object to %s' % TYPE - -BUILTIN_ANALYZERS[ootype.instanceof] = instanceof -BUILTIN_ANALYZERS[ootype.new] = new -BUILTIN_ANALYZERS[ootype.oonewarray] = oonewarray -BUILTIN_ANALYZERS[ootype.null] = null -BUILTIN_ANALYZERS[ootype.runtimenew] = runtimenew -BUILTIN_ANALYZERS[ootype.classof] = classof -BUILTIN_ANALYZERS[ootype.subclassof] = subclassof -BUILTIN_ANALYZERS[ootype.ooupcast] = ooupcast -BUILTIN_ANALYZERS[ootype.oodowncast] = oodowncast -BUILTIN_ANALYZERS[ootype.cast_to_object] = cast_to_object -BUILTIN_ANALYZERS[ootype.cast_from_object] = cast_from_object - #________________________________ # weakrefs diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -603,36 +603,6 @@ return False -class SomeOOObject(SomeObject): - def __init__(self): - from rpython.rtyper.ootypesystem import ootype - self.ootype = ootype.Object - - -class SomeOOClass(SomeObject): - def __init__(self, ootype): - self.ootype = ootype - - -class SomeOOInstance(SomeObject): - def __init__(self, ootype, can_be_None=False): - self.ootype = ootype - self.can_be_None = can_be_None - - -class SomeOOBoundMeth(SomeObject): - immutable = True - - def __init__(self, ootype, name): - self.ootype = ootype - self.name = name - - -class SomeOOStaticMeth(SomeObject): - immutable = True - - def __init__(self, method): - self.method = method annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), @@ -647,16 +617,6 @@ def annotation_to_lltype(s_val, info=None): - from rpython.rtyper.ootypesystem import ootype - - if isinstance(s_val, SomeOOInstance): - return s_val.ootype - if isinstance(s_val, SomeOOStaticMeth): - return s_val.method - if isinstance(s_val, SomeOOClass): - return ootype.Class - if isinstance(s_val, SomeOOObject): - return s_val.ootype if isinstance(s_val, SomeInteriorPtr): p = s_val.ll_ptrtype if 0 in p.offsets: @@ -683,8 +643,6 @@ def lltype_to_annotation(T): - from rpython.rtyper.ootypesystem import ootype - try: s = ll_to_annotation_map.get(T) except TypeError: @@ -694,14 +652,6 @@ return lltype_to_annotation(T.OF) if isinstance(T, lltype.Number): return SomeInteger(knowntype=T._type) - if isinstance(T, (ootype.Instance, ootype.BuiltinType)): - return SomeOOInstance(T) - elif isinstance(T, ootype.StaticMethod): - return SomeOOStaticMeth(T) - elif T == ootype.Class: - return SomeOOClass(ootype.ROOT) - elif T == ootype.Object: - return SomeOOObject() elif isinstance(T, lltype.InteriorPtr): return SomeInteriorPtr(T) else: diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -2,7 +2,6 @@ from rpython.annotator.model import * from rpython.annotator.listdef import ListDef -from rpython.rtyper.ootypesystem import ootype listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) @@ -57,11 +56,11 @@ (s6, s6)]) def test_commonbase_simple(): - class A0: + class A0: pass - class A1(A0): + class A1(A0): pass - class A2(A0): + class A2(A0): pass class B1(object): pass @@ -73,10 +72,10 @@ except TypeError: # if A0 is also a new-style class, e.g. in PyPy class B3(A0, object): pass - assert commonbase(A1,A2) is A0 + assert commonbase(A1,A2) is A0 assert commonbase(A1,A0) is A0 assert commonbase(A1,A1) is A1 - assert commonbase(A2,B2) is object + assert commonbase(A2,B2) is object assert commonbase(A2,B3) is A0 def test_list_union(): @@ -115,9 +114,6 @@ assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(S) s_p = ll_to_annotation(lltype.malloc(A, 0)) assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(A) - C = ootype.Instance('C', ootype.ROOT, {}) - s_p = ll_to_annotation(ootype.new(C)) - assert isinstance(s_p, SomeOOInstance) and s_p.ootype == C def test_annotation_to_lltype(): from rpython.rlib.rarithmetic import r_uint, r_singlefloat @@ -125,8 +121,8 @@ s_pos = SomeInteger(nonneg=True) s_1 = SomeInteger(nonneg=True); s_1.const = 1 s_m1 = SomeInteger(nonneg=False); s_m1.const = -1 - s_u = SomeInteger(nonneg=True, unsigned=True); - s_u1 = SomeInteger(nonneg=True, unsigned=True); + s_u = SomeInteger(nonneg=True, unsigned=True); + s_u1 = SomeInteger(nonneg=True, unsigned=True); s_u1.const = r_uint(1) assert annotation_to_lltype(s_i) == lltype.Signed assert annotation_to_lltype(s_pos) == lltype.Signed @@ -140,13 +136,10 @@ s_p = SomePtr(ll_ptrtype=PS) assert annotation_to_lltype(s_p) == PS py.test.raises(ValueError, "annotation_to_lltype(si0)") - C = ootype.Instance('C', ootype.ROOT, {}) - ref = SomeOOInstance(C) - assert annotation_to_lltype(ref) == C s_singlefloat = SomeSingleFloat() s_singlefloat.const = r_singlefloat(0.0) assert annotation_to_lltype(s_singlefloat) == lltype.SingleFloat - + def test_ll_union(): PS1 = lltype.Ptr(lltype.GcStruct('s')) PS2 = lltype.Ptr(lltype.GcStruct('s')) @@ -172,29 +165,6 @@ py.test.raises(AssertionError, "unionof(SomeInteger(), SomePtr(PS1))") py.test.raises(AssertionError, "unionof(SomeObject(), SomePtr(PS1))") -def test_oo_union(): - C1 = ootype.Instance("C1", ootype.ROOT) - C2 = ootype.Instance("C2", C1) - C3 = ootype.Instance("C3", C1) - D = ootype.Instance("D", ootype.ROOT) - assert unionof(SomeOOInstance(C1), SomeOOInstance(C1)) == SomeOOInstance(C1) - assert unionof(SomeOOInstance(C1), SomeOOInstance(C2)) == SomeOOInstance(C1) - assert unionof(SomeOOInstance(C2), SomeOOInstance(C1)) == SomeOOInstance(C1) - assert unionof(SomeOOInstance(C2), SomeOOInstance(C3)) == SomeOOInstance(C1) - - assert unionof(SomeOOInstance(C1),SomeImpossibleValue()) == SomeOOInstance(C1) - assert unionof(SomeImpossibleValue(), SomeOOInstance(C1)) == SomeOOInstance(C1) - - assert unionof(SomeOOInstance(C1), SomeOOInstance(D)) == SomeOOInstance(ootype.ROOT) - -def test_ooclass_array_contains(): - A = ootype.Array(ootype.Signed) - cls = ootype.runtimeClass(A) - s1 = SomeOOClass(A) - s2 = SomeOOClass(A) - s2.const=cls - assert s1.contains(s2) - def test_nan(): f1 = SomeFloat() f1.const = float("nan") diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -756,7 +756,6 @@ From noreply at buildbot.pypy.org Sun Jul 28 11:12:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 28 Jul 2013 11:12:16 +0200 (CEST) Subject: [pypy-commit] pypy default: document the merge Message-ID: <20130728091216.DA6521C00D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65730:6993c76a8bc6 Date: 2013-07-28 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/6993c76a8bc6/ Log: document the merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -46,3 +46,5 @@ .. branch: ndarray-subtype Allow subclassing ndarray, i.e. matrix + +.. branch: kill-ootype From noreply at buildbot.pypy.org Sun Jul 28 11:44:37 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 28 Jul 2013 11:44:37 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130728094437.380161C00D8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65731:d6b1adc23375 Date: 2013-07-27 21:08 +0200 http://bitbucket.org/pypy/pypy/changeset/d6b1adc23375/ Log: Fix. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -91,11 +91,11 @@ return ch def _join_return_one(self, space, w_obj): - return space.is_w(space.type(w_obj), space.w_unicode) + return False def _join_check_item(self, space, w_obj): - if (space.is_w(space.type(w_obj), space.w_str) or - space.is_w(space.type(w_obj), space.w_bytearray)): + if (space.isinstance_w(w_obj, space.w_str) or + space.isinstance_w(w_obj, space.w_bytearray)): return 0 return 1 From noreply at buildbot.pypy.org Sun Jul 28 11:44:38 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 28 Jul 2013 11:44:38 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Don't share an empty bytearray. Message-ID: <20130728094438.89A1F1C030B@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65732:6c6d25371f1f Date: 2013-07-27 23:02 +0200 http://bitbucket.org/pypy/pypy/changeset/6c6d25371f1f/ Log: Don't share an empty bytearray. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -34,6 +34,12 @@ def _new(self, value): return W_BytearrayObject(_make_data(value)) + def _new_from_list(self, value): + return W_BytearrayObject(value) + + def _empty(self): + return W_BytearrayObject([]) + def _len(self): return len(self.data) @@ -47,7 +53,6 @@ assert len(char) == 1 return str(char)[0] - _empty = '' _builder = StringBuilder def _newlist_unwrapped(self, space, res): @@ -270,8 +275,6 @@ def descr_reverse(self, space): self.data.reverse() -W_BytearrayObject.EMPTY = W_BytearrayObject([]) - bytearray_append = SMM('append', 2) bytearray_extend = SMM('extend', 2) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -72,6 +72,12 @@ def _new(self, value): return W_BytesObject(value) + def _new_from_list(self, value): + return W_BytesObject(''.join(value)) + + def _empty(self): + return W_BytesObject.EMPTY + def _len(self): return len(self._value) @@ -86,7 +92,6 @@ assert len(char) == 1 return str(char)[0] - _empty = '' _builder = StringBuilder def _isupper(self, ch): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -127,7 +127,7 @@ return space.w_NotImplemented raise if times <= 0: - return self.EMPTY + return self._empty() if self._len() == 1: return self._new(self._val(space)[0] * times) return self._new(self._val(space) * times) @@ -139,13 +139,13 @@ length = len(selfvalue) start, stop, step, sl = w_index.indices4(space, length) if sl == 0: - return self.EMPTY + return self._empty() elif step == 1: assert start >= 0 and stop >= 0 return self._sliced(space, selfvalue, start, stop, self) else: - str = self._empty.join([selfvalue[start + i*step] for i in range(sl)]) - return self._new(str) + ret = [selfvalue[start + i*step] for i in range(sl)] + return self._new_from_list(ret) index = space.getindex_w(w_index, space.w_IndexError, "string index") selfvalue = self._val(space) @@ -167,7 +167,7 @@ start, stop = normalize_simple_slice(space, len(selfvalue), w_start, w_stop) if start == stop: - return self.EMPTY + return self._empty() else: return self._sliced(space, selfvalue, start, stop, self) @@ -175,7 +175,7 @@ def descr_capitalize(self, space): value = self._val(space) if len(value) == 0: - return self.EMPTY + return self._empty() builder = self._builder(len(value)) builder.append(self._upper(value[0])) @@ -228,7 +228,7 @@ def descr_expandtabs(self, space, tabsize=8): value = self._val(space) if not value: - return self.EMPTY + return self._empty() splitted = value.split(self._chr('\t')) try: @@ -397,7 +397,7 @@ size = len(list_w) if size == 0: - return self.EMPTY + return self._empty() if size == 1: w_s = list_w[0] @@ -484,7 +484,7 @@ space.wrap("empty separator")) pos = value.find(sub) if pos == -1: - return space.newtuple([self, self.EMPTY, self.EMPTY]) + return space.newtuple([self, self._empty(), self._empty()]) else: from pypy.objspace.std.bytearrayobject import W_BytearrayObject if isinstance(self, W_BytearrayObject): @@ -502,7 +502,7 @@ space.wrap("empty separator")) pos = value.rfind(sub) if pos == -1: - return space.newtuple([self.EMPTY, self.EMPTY, self]) + return space.newtuple([self._empty(), self._empty(), self]) else: from pypy.objspace.std.bytearrayobject import W_BytearrayObject if isinstance(self, W_BytearrayObject): diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -72,6 +72,12 @@ def _new(self, value): return W_UnicodeObject(value) + def _new_from_list(self, value): + return W_UnicodeObject(u''.join(value)) + + def _empty(self): + return W_UnicodeObject.EMPTY + def _len(self): return len(self._value) @@ -87,7 +93,6 @@ assert len(char) == 1 return unicode(char)[0] - _empty = u'' _builder = UnicodeBuilder def _isupper(self, ch): From noreply at buildbot.pypy.org Sun Jul 28 11:48:26 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 28 Jul 2013 11:48:26 +0200 (CEST) Subject: [pypy-commit] pypy default: revert setting cppflags and ldflagsto original appraoch, this doesnt break setuptools monkeypatching distutils Message-ID: <20130728094826.5F2D51C00D8@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: Changeset: r65733:9fad3a8b4208 Date: 2013-07-28 01:08 +0200 http://bitbucket.org/pypy/pypy/changeset/9fad3a8b4208/ Log: revert setting cppflags and ldflagsto original appraoch, this doesnt break setuptools monkeypatching distutils diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( From noreply at buildbot.pypy.org Sun Jul 28 11:48:27 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 28 Jul 2013 11:48:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in squeaky/pypy (pull request #169) Message-ID: <20130728094827.B2AD61C00D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65734:ba26aca2e830 Date: 2013-07-28 11:47 +0200 http://bitbucket.org/pypy/pypy/changeset/ba26aca2e830/ Log: Merged in squeaky/pypy (pull request #169) revert setting cppflags and ldflagsto original appraoch, this doesnt break setuptools monkeypatching distutils diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( From noreply at buildbot.pypy.org Sun Jul 28 12:59:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 12:59:43 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: x86-32: don't put anything in the stack before esp, because these locations may be overritten randomly Message-ID: <20130728105943.0FCBB1C030B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65735:3360d9f154d9 Date: 2013-07-28 12:56 +0200 http://bitbucket.org/pypy/pypy/changeset/3360d9f154d9/ Log: x86-32: don't put anything in the stack before esp, because these locations may be overritten randomly diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -14,7 +14,7 @@ from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken from rpython.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, - gpr_reg_mgr_cls, xmm_reg_mgr_cls) + gpr_reg_mgr_cls, xmm_reg_mgr_cls, _register_arguments) from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) from rpython.jit.backend.x86.arch import (FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, @@ -163,7 +163,8 @@ # the caller is responsible for putting arguments in the right spot mc.SUB(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 8 * WORD) - # args are in their respective positions + for i in range(4): + mc.MOV_sr(i * WORD, _register_arguments[i].value) mc.CALL(eax) if IS_X86_64: mc.ADD(esp, imm(WORD)) @@ -2160,7 +2161,7 @@ def label(self): self._check_frame_depth_debug(self.mc) - def cond_call(self, op, gcmap, cond_loc, call_loc, arglocs): + def cond_call(self, op, gcmap, cond_loc, call_loc): self.mc.TEST(cond_loc, cond_loc) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() @@ -2176,11 +2177,6 @@ if self._regalloc.xrm.reg_bindings: floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] - if IS_X86_32: - p = -8 * WORD - for loc in arglocs: - self.mc.MOV(RawEspLoc(p, INT), loc) - p += WORD self.mc.CALL(imm(cond_call_adr)) self.pop_gcmap(self.mc) # never any result value diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -56,7 +56,6 @@ no_lower_byte_regs = [] save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10] - register_arguments = [edi, esi, edx, ecx] class X86XMMRegisterManager(RegisterManager): @@ -120,6 +119,9 @@ for _i, _reg in enumerate(gpr_reg_mgr_cls.all_regs): gpr_reg_mgr_cls.all_reg_indexes[_reg.value] = _i +_register_arguments = [edi, esi, edx, ecx] + + class RegAlloc(BaseRegalloc): def __init__(self, assembler, translate_support_code=False): @@ -809,18 +811,12 @@ imm = self.rm.convert_to_imm(v) self.assembler.regalloc_mov(imm, eax) args_so_far = [tmpbox] - locs = [] for i in range(2, len(args)): - if IS_X86_64: - reg = self.rm.register_arguments[i - 2] - self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) - else: - loc = self.make_sure_var_in_reg(args[i], args_so_far) - locs.append(loc) + reg = _register_arguments[i - 2] + self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) args_so_far.append(args[i]) loc_cond = self.make_sure_var_in_reg(args[0], args) - self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax, - locs) + self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax) self.rm.possibly_free_var(tmpbox) def consider_call_malloc_nursery(self, op): From noreply at buildbot.pypy.org Sun Jul 28 13:24:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 13:24:35 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: hg merge default Message-ID: <20130728112435.5DE0F1C030B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65736:515200fc2957 Date: 2013-07-28 13:23 +0200 http://bitbucket.org/pypy/pypy/changeset/515200fc2957/ Log: hg merge default diff too long, truncating to 2000 out of 45836 lines diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,11 +48,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "_sha", "cStringIO", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -340,10 +335,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -351,10 +342,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -3,14 +3,20 @@ =============== We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. -This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html Highlights ========== +* Support for os.statvfs and os.fstatvfs on unix systems. + * Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). -* Fixed issue `1552`_: GreenletExit should inherit from BaseException +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. * Fixed issue `1537`_: numpypy __array_interface__ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -32,8 +32,19 @@ .. branch: ssl_moving_write_buffer +.. branch: pythoninspect-fix +Make PyPy respect PYTHONINSPECT variable set via os.putenv in the same process +to start interactive prompt when the script execution finishes. This adds +new __pypy__.os.real_getenv call that bypasses Python cache and looksup env +in the underlying OS. Translatorshell now works on PyPy. + .. branch: add-statvfs Added os.statvfs and os.fstatvfs .. branch: statvfs_tests Added some addition tests for statvfs. + +.. branch: ndarray-subtype +Allow subclassing ndarray, i.e. matrix + +.. branch: kill-ootype diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -234,9 +234,6 @@ from pypy.config.pypyoption import enable_translationmodules enable_translationmodules(config) - ## if config.translation.type_system == 'ootype': - ## config.objspace.usemodules.suggest(rbench=True) - config.translation.suggest(check_str_without_nul=True) if config.translation.thread: @@ -271,12 +268,6 @@ elif config.objspace.usemodules.pypyjit: config.translation.jit = True - if config.translation.backend == "cli": - config.objspace.usemodules.clr = True - # XXX did it ever work? - #elif config.objspace.usemodules.clr: - # config.translation.backend == "cli" - if config.translation.sandbox: config.objspace.lonepycfiles = False config.objspace.usepycfiles = False @@ -292,16 +283,6 @@ wrapstr = 'space.wrap(%r)' % (options) pypy.module.sys.Module.interpleveldefs['pypy_translation_info'] = wrapstr - if config.translation.backend in ["cli", "jvm"] and sys.platform == "win32": - # HACK: The ftruncate implementation in streamio.py which is used for the Win32 platform - # is specific for the C backend and can't be generated on CLI or JVM. Because of that, - # we have to patch it out. - from rpython.rlib import streamio - def ftruncate_win32_dummy(fd, size): pass - def _setfd_binary_dummy(fd): pass - streamio.ftruncate_win32 = ftruncate_win32_dummy - streamio._setfd_binary = _setfd_binary_dummy - return self.get_entry_point(config) def jitpolicy(self, driver): diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -556,8 +556,15 @@ # or # * PYTHONINSPECT is set and stdin is a tty. # + try: + # we need a version of getenv that bypasses Python caching + from __pypy__.os import real_getenv + except ImportError: + # dont fail on CPython here + real_getenv = os.getenv + return (interactive or - ((inspect or (readenv and os.getenv('PYTHONINSPECT'))) + ((inspect or (readenv and real_getenv('PYTHONINSPECT'))) and sys.stdin.isatty())) success = True diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -896,6 +896,21 @@ assert False, (3,) except AssertionError, e: assert str(e) == "(3,)" + + # BUILD_LIST_FROM_ARG is PyPy specific + @py.test.mark.skipif('config.option.runappdirect') + def test_build_list_from_arg_length_hint(self): + hint_called = [False] + class Foo(object): + def __length_hint__(self): + hint_called[0] = True + return 5 + def __iter__(self): + for i in range(5): + yield i + l = [a for a in Foo()] + assert hint_called[0] + assert l == list(range(5)) class TestOptimizations: diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -414,8 +414,8 @@ closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): raise operationerrfmt(space.w_ValueError, - "%s() requires a code object with %d free vars, not %d", - self.name, closure_len, len(code.co_freevars)) + "%N() requires a code object with %d free vars, not %d", + self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code @@ -482,7 +482,6 @@ space.abstract_isinstance_w(w_firstarg, self.w_class)): pass # ok else: - myname = self.getname(space, "") clsdescr = self.w_class.getname(space, "") if clsdescr: clsdescr += " instance" @@ -497,10 +496,10 @@ instdescr = instname + " instance" else: instdescr = "instance" - msg = ("unbound method %s() must be called with %s " + msg = ("unbound method %N() must be called with %s " "as first argument (got %s instead)") raise operationerrfmt(space.w_TypeError, msg, - myname, clsdescr, instdescr) + self, clsdescr, instdescr) return space.call_args(self.w_function, args) def descr_method_get(self, w_obj, w_cls=None): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -707,16 +707,17 @@ self.pushvalue(w_list) def BUILD_LIST_FROM_ARG(self, _, next_instr): + space = self.space # this is a little dance, because list has to be before the # value last_val = self.popvalue() + length_hint = 0 try: - lgt = self.space.len_w(last_val) - except OperationError, e: - if e.async(self.space): + length_hint = space.length_hint(last_val, length_hint) + except OperationError as e: + if e.async(space): raise - lgt = 0 # oh well - self.pushvalue(self.space.newlist([], sizehint=lgt)) + self.pushvalue(space.newlist([], sizehint=length_hint)) self.pushvalue(last_val) def LOAD_ATTR(self, nameindex, next_instr): diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -48,7 +48,7 @@ pdir = _get_next_path(ext='') p = pdir.ensure(dir=1).join('__main__.py') p.write(str(py.code.Source(source))) - # return relative path for testing purposes + # return relative path for testing purposes return py.path.local().bestrelpath(pdir) demo_script = getscript(""" @@ -706,6 +706,20 @@ assert 'hello world\n' in data assert '42\n' in data + def test_putenv_fires_interactive_within_process(self): + try: + import __pypy__ + except ImportError: + py.test.skip("This can be only tested on PyPy with real_getenv") + + # should be noninteractive when piped in + data = 'import os\nos.putenv("PYTHONINSPECT", "1")\n' + self.run('', senddata=data, expect_prompt=False) + + # should go interactive with -c + data = data.replace('\n', ';') + self.run("-c '%s'" % data, expect_prompt=True) + def test_option_S_copyright(self): data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data @@ -971,7 +985,7 @@ pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') app_main.setup_bootstrap_path(pypy_c) newpath = sys.path[:] - # we get at least lib_pypy + # we get at least lib_pypy # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) assert len(newpath) >= 2 for p in newpath: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -553,9 +553,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%s' for '%s' objects doesn't apply to '%T' object" + m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" raise operationerrfmt(space.w_TypeError, m, - self.name, self.w_cls.name, w_obj) + self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -50,6 +50,13 @@ } +class OsModule(MixedModule): + appleveldefs = {} + interpleveldefs = { + 'real_getenv': 'interp_os.real_getenv' + } + + class Module(MixedModule): appleveldefs = { } @@ -82,6 +89,7 @@ "time": TimeModule, "thread": ThreadModule, "intop": IntOpModule, + "os": OsModule, } def setup_after_space_initialization(self): diff --git a/pypy/module/__pypy__/interp_os.py b/pypy/module/__pypy__/interp_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/interp_os.py @@ -0,0 +1,9 @@ +import os + +from pypy.interpreter.gateway import unwrap_spec + + + at unwrap_spec(name='str0') +def real_getenv(space, name): + """Get an OS environment value skipping Python cache""" + return space.wrap(os.environ.get(name)) diff --git a/pypy/module/__pypy__/test/test_os.py b/pypy/module/__pypy__/test/test_os.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_os.py @@ -0,0 +1,16 @@ +class AppTestOs: + spaceconfig = dict(usemodules=['__pypy__']) + + def test_real_getenv(self): + import __pypy__.os + import os + + key = 'UNLIKELY_SET' + assert key not in os.environ + os.putenv(key, '42') + # this one skips Python cache + assert __pypy__.os.real_getenv(key) == '42' + # this one can only see things set on interpter start (cached) + assert os.getenv(key) is None + os.unsetenv(key) + assert __pypy__.os.real_getenv(key) is None diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -4,6 +4,7 @@ from pypy.interpreter.error import operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.module._rawffi.interp_rawffi import wrap_dlopenerror from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rdynload import DLLHANDLE, dlopen, dlsym, dlclose, DLOpenError @@ -24,9 +25,7 @@ try: self.handle = dlopen(ll_libname, flags) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, - "cannot load library %s: %s", - filename, e.msg) + raise wrap_dlopenerror(space, e, filename) self.name = filename def __del__(self): diff --git a/pypy/module/_codecs/__init__.py b/pypy/module/_codecs/__init__.py --- a/pypy/module/_codecs/__init__.py +++ b/pypy/module/_codecs/__init__.py @@ -18,10 +18,10 @@ The builtin Unicode codecs use the following interface: - _encode(Unicode_object[,errors='strict']) -> + _encode(Unicode_object[,errors='strict']) -> (string object, bytes consumed) - _decode(char_buffer_obj[,errors='strict']) -> + _decode(char_buffer_obj[,errors='strict']) -> (Unicode object, bytes consumed) _encode() interfaces also accept non-Unicode object as @@ -90,8 +90,7 @@ "NOT_RPYTHON" # mbcs codec is Windows specific, and based on rffi. - if (hasattr(runicode, 'str_decode_mbcs') and - space.config.translation.type_system != 'ootype'): + if (hasattr(runicode, 'str_decode_mbcs')): self.interpleveldefs['mbcs_encode'] = 'interp_codecs.mbcs_encode' self.interpleveldefs['mbcs_decode'] = 'interp_codecs.mbcs_decode' diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_ffi/interp_funcptr.py @@ -14,7 +14,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error +from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os if os.name == 'nt': @@ -324,8 +324,7 @@ try: self.cdll = libffi.CDLL(name, mode) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', self.name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, self.name) def getfunc(self, space, w_name, w_argtypes, w_restype): return _getfunc(space, self, w_name, w_argtypes, w_restype) diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -12,18 +12,6 @@ "set_file_encoding": "interp_file.set_file_encoding", } - def __init__(self, space, *args): - "NOT_RPYTHON" - - # on windows with oo backends, remove file.truncate, - # because the implementation is based on rffi - if (sys.platform == 'win32' and - space.config.translation.type_system == 'ootype'): - from pypy.module._file.interp_file import W_File - del W_File.typedef.rawdict['truncate'] - - MixedModule.__init__(self, space, *args) - def shutdown(self, space): # at shutdown, flush all open streams. Ignore I/O errors. from pypy.module._file.interp_file import getopenstreams, StreamErrors diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -140,6 +140,11 @@ raise OperationError(space.w_SystemError, space.wrap("not supported by libffi")) +def wrap_dlopenerror(space, e, filename): + msg = e.msg if e.msg else 'unspecified error' + return operationerrfmt(space.w_OSError, 'Cannot load library %s: %s', + filename, msg) + class W_CDLL(W_Root): def __init__(self, space, name, cdll): @@ -219,8 +224,7 @@ try: cdll = CDLL(name) except DLOpenError, e: - raise operationerrfmt(space.w_OSError, '%s: %s', name, - e.msg or 'unspecified error') + raise wrap_dlopenerror(space, e, name) except OSError, e: raise wrap_oserror(space, e) return space.wrap(W_CDLL(space, name, cdll)) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -223,7 +223,8 @@ _rawffi.CDLL("xxxxx_this_name_does_not_exist_xxxxx") except OSError, e: print e - assert str(e).startswith("xxxxx_this_name_does_not_exist_xxxxx: ") + assert str(e).startswith( + "Cannot load library xxxxx_this_name_does_not_exist_xxxxx: ") else: raise AssertionError("did not fail??") diff --git a/pypy/module/clr/__init__.py b/pypy/module/clr/__init__.py deleted file mode 100644 --- a/pypy/module/clr/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Package initialisation -from pypy.interpreter.mixedmodule import MixedModule - -import boxing_rules # with side effects - -class Module(MixedModule): - """CLR module""" - - appleveldefs = { - 'dotnetimporter': 'app_importer.importer' - } - - interpleveldefs = { - '_CliObject_internal': 'interp_clr.W_CliObject', - 'call_staticmethod': 'interp_clr.call_staticmethod', - 'load_cli_class': 'interp_clr.load_cli_class', - 'get_assemblies_info': 'interp_clr.get_assemblies_info', - 'AddReferenceByPartialName': 'interp_clr.AddReferenceByPartialName', - } - - def startup(self, space): - self.space.appexec([self], """(clr_module): - import sys - clr_module.get_assemblies_info() # load info for std assemblies - sys.meta_path.append(clr_module.dotnetimporter()) - """) diff --git a/pypy/module/clr/app_clr.py b/pypy/module/clr/app_clr.py deleted file mode 100644 --- a/pypy/module/clr/app_clr.py +++ /dev/null @@ -1,204 +0,0 @@ -# NOT_RPYTHON - -class StaticMethodWrapper(object): - __slots__ = ('class_name', 'meth_name',) - - def __init__(self, class_name, meth_name): - self.class_name = class_name - self.meth_name = meth_name - - def __call__(self, *args): - import clr - return clr.call_staticmethod(self.class_name, self.meth_name, args) - - def __repr__(self): - return '' % (self.class_name, self.meth_name) - - -class MethodWrapper(object): - __slots__ = ('meth_name',) - - def __init__(self, meth_name): - self.meth_name = meth_name - - def __get__(self, obj, type_): - if obj is None: - return UnboundMethod(type_, self.meth_name) - else: - return BoundMethod(self.meth_name, obj) - - def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, repr(self.meth_name)) - - -class UnboundMethod(object): - __slots__ = ('im_class', 'im_name') - - def __init__(self, im_class, im_name): - self.im_class = im_class - self.im_name = im_name - - def __raise_TypeError(self, thing): - raise TypeError, 'unbound method %s() must be called with %s ' \ - 'instance as first argument (got %s instead)' % \ - (self.im_name, self.im_class.__cliclass__, thing) - - def __call__(self, *args): - if len(args) == 0: - self.__raise_TypeError('nothing') - im_self = args[0] - if not isinstance(im_self, self.im_class): - self.__raise_TypeError('%s instance' % im_self.__class__.__name__) - return im_self.__cliobj__.call_method(self.im_name, args, 1) # ignore the first arg - - def __repr__(self): - return '' % (self.im_class.__cliclass__, self.im_name) - - -class BoundMethod(object): - __slots__ = ('im_name', 'im_self') - - def __init__(self, im_name, im_self): - self.im_name = im_name - self.im_self = im_self - - def __call__(self, *args): - return self.im_self.__cliobj__.call_method(self.im_name, args) - - def __repr__(self): - return '' % (self.im_self.__class__.__cliclass__, - self.im_name, - self.im_self) - -class StaticProperty(object): - def __init__(self, fget=None, fset=None): - self.fget = fget - self.fset = fset - - def __get__(self, obj, type_): - return self.fget() - -def _qualify(t): - mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' - return '%s, %s' % (t, mscorlib) - -class MetaGenericCliClassWrapper(type): - _cli_types = { - int: _qualify('System.Int32'), - str: _qualify('System.String'), - bool: _qualify('System.Boolean'), - float: _qualify('System.Double'), - } - _System_Object = _qualify('System.Object') - - def _cli_name(cls, ttype): - if isinstance(ttype, MetaCliClassWrapper): - return '[%s]' % ttype.__fullyqualifiedname__ - else: - return '[%s]' % cls._cli_types.get(ttype, cls._System_Object) - - def __setattr__(cls, name, value): - obj = cls.__dict__.get(name, None) - if isinstance(obj, StaticProperty): - obj.fset(value) - else: - type.__setattr__(cls, name, value) - - def __getitem__(cls, type_or_tuple): - import clr - if isinstance(type_or_tuple, tuple): - types = type_or_tuple - else: - types = (type_or_tuple,) - namespace, generic_class = cls.__cliclass__.rsplit('.', 1) - generic_params = [cls._cli_name(t) for t in types] - instance_class = '%s[%s]' % (generic_class, ','.join(generic_params)) - try: - return clr.load_cli_class(cls.__assemblyname__, namespace, instance_class) - except ImportError: - raise TypeError, "Cannot load type %s.%s" % (namespace, instance_class) - -class MetaCliClassWrapper(type): - def __setattr__(cls, name, value): - obj = cls.__dict__.get(name, None) - if isinstance(obj, StaticProperty): - obj.fset(value) - else: - type.__setattr__(cls, name, value) - -class CliClassWrapper(object): - __slots__ = ('__cliobj__',) - - def __init__(self, *args): - import clr - self.__cliobj__ = clr._CliObject_internal(self.__fullyqualifiedname__, args) - - -class IEnumeratorWrapper(object): - def __init__(self, enumerator): - self.__enumerator__ = enumerator - - def __iter__(self): - return self - - def next(self): - if not self.__enumerator__.MoveNext(): - raise StopIteration - return self.__enumerator__.Current - -# this method need to be attached only to classes that implements IEnumerable (see build_wrapper) -def __iter__(self): - return IEnumeratorWrapper(self.GetEnumerator()) - -def wrapper_from_cliobj(cls, cliobj): - obj = cls.__new__(cls) - obj.__cliobj__ = cliobj - return obj - -def build_wrapper(namespace, classname, assemblyname, - staticmethods, methods, properties, indexers, - hasIEnumerable, isClassGeneric): - fullname = '%s.%s' % (namespace, classname) - assembly_qualified_name = '%s, %s' % (fullname, assemblyname) - d = {'__cliclass__': fullname, - '__fullyqualifiedname__': assembly_qualified_name, - '__assemblyname__': assemblyname, - '__module__': namespace} - for name in staticmethods: - d[name] = StaticMethodWrapper(assembly_qualified_name, name) - for name in methods: - d[name] = MethodWrapper(name) - - # check if IEnumerable is implemented - if hasIEnumerable: - d['__iter__'] = __iter__ - - assert len(indexers) <= 1 - if indexers: - name, getter, setter, is_static = indexers[0] - assert not is_static - if getter: - d['__getitem__'] = d[getter] - if setter: - d['__setitem__'] = d[setter] - if isClassGeneric: - cls = MetaGenericCliClassWrapper(classname, (CliClassWrapper,), d) - else: - cls = MetaCliClassWrapper(classname, (CliClassWrapper,), d) - - # we must add properties *after* the class has been created - # because we need to store UnboundMethods as getters and setters - for (name, getter, setter, is_static) in properties: - fget = None - fset = None - if getter: - fget = getattr(cls, getter) - if setter: - fset = getattr(cls, setter) - if is_static: - prop = StaticProperty(fget, fset) - else: - prop = property(fget, fset) - setattr(cls, name, prop) - - return cls diff --git a/pypy/module/clr/app_importer.py b/pypy/module/clr/app_importer.py deleted file mode 100644 --- a/pypy/module/clr/app_importer.py +++ /dev/null @@ -1,85 +0,0 @@ -"""NOT_RPYTHON""" - -# Meta hooks are called at the start of Import Processing -# Meta hooks can override the sys.path, frozen modules , built-in modules -# To register a Meta Hook simply add importer object to sys.meta_path - -import sys -import types - -class importer(object): - ''' - If the importer is installed on sys.meta_path, it will - receive a second argument, which is None for a top-level module, or - package.__path__ for submodules or subpackages - - It should return a loader object if the module was found, or None if it wasn\'t. - If find_module() raises an exception, the caller will abort the import. - When importer.find_module("spam.eggs.ham") is called, "spam.eggs" has already - been imported and added to sys.modules. - ''' - - def find_module(self, fullname, path=None): - import clr - namespaces, classes, generics = clr.get_assemblies_info() - - if fullname in namespaces or fullname in classes: - return self # fullname is a .NET Module - else: - return None # fullname is not a .NET Module - - def load_module(self, fullname): - ''' - The load_module() must fulfill the following *before* it runs any code: - Note that the module object *must* be in sys.modules before the - loader executes the module code. - - A If 'fullname' exists in sys.modules, the loader must use that - else the loader must create a new module object and add it to sys.modules. - - module = sys.modules.setdefault(fullname, new.module(fullname)) - - B The __file__ attribute must be set. String say "" - - C The __name__ attribute must be set. If one uses - imp.new_module() then the attribute is set automatically. - - D If it\'s a package, the __path__ variable must be set. This must - be a list, but may be empty if __path__ has no further - significance to the importer (more on this later). - - E It should add a __loader__ attribute to the module, set to the loader object. - - ''' - # If it is a call for a Class then return with the Class reference - import clr - namespaces, classes, generics = clr.get_assemblies_info() - - if fullname in classes: - assemblyname = classes[fullname] - fullname = generics.get(fullname, fullname) - ns, classname = fullname.rsplit('.', 1) - sys.modules[fullname] = clr.load_cli_class(assemblyname, ns, classname) - else: # if not a call for actual class (say for namespaces) assign an empty module - if fullname not in sys.modules: - mod = CLRModule(fullname) - mod.__file__ = "<%s>" % self.__class__.__name__ - mod.__loader__ = self - mod.__name__ = fullname - # add it to the modules dict - sys.modules[fullname] = mod - - # if it is a PACKAGE then we are to initialize the __path__ for the module - # we won't deal with Packages here - return sys.modules[fullname] - -class CLRModule(types.ModuleType): - def __getattr__(self, name): - if not name.startswith("__"): - try: - iname = self.__name__ + '.' + name - __import__(iname) - except ImportError: - pass - return types.ModuleType.__getattribute__(self, name) - diff --git a/pypy/module/clr/assemblyname.py b/pypy/module/clr/assemblyname.py deleted file mode 100644 --- a/pypy/module/clr/assemblyname.py +++ /dev/null @@ -1,2 +0,0 @@ -mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' -System = 'System, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' diff --git a/pypy/module/clr/boxing_rules.py b/pypy/module/clr/boxing_rules.py deleted file mode 100644 --- a/pypy/module/clr/boxing_rules.py +++ /dev/null @@ -1,53 +0,0 @@ -from rpython.tool.pairtype import extendabletype -from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.floatobject import W_FloatObject -from pypy.objspace.std.boolobject import W_BoolObject -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.stringobject import W_StringObject -from rpython.translator.cli.dotnet import box - -class __extend__(W_Root): - __metaclass__ = extendabletype - - def tocli(self): - return box(self) - -class __extend__(W_IntObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.intval) - -class __extend__(W_FloatObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.floatval) - -class __extend__(W_NoneObject): - __metaclass__ = extendabletype - - def tocli(self): - return None - -class __extend__(W_BoolObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self.boolval) - -class __extend__(W_StringObject): - __metaclass__ = extendabletype - - def tocli(self): - return box(self._value) - -##from pypy.objspace.fake.objspace import W_Object as W_Object_Fake -##from rpython.rlib.nonconst import NonConstant - -##class __extend__(W_Object_Fake): -## __metaclass__ = extendabletype - -## def tocli(self): -## return NonConstant(None) diff --git a/pypy/module/clr/interp_clr.py b/pypy/module/clr/interp_clr.py deleted file mode 100644 --- a/pypy/module/clr/interp_clr.py +++ /dev/null @@ -1,364 +0,0 @@ -import os.path -from pypy.module.clr import assemblyname -from pypy.interpreter.baseobjspace import W_Root, W_Root -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, ApplevelClass -from pypy.interpreter.typedef import TypeDef -from rpython.rtyper.ootypesystem import ootype -from rpython.translator.cli.dotnet import CLR, box, unbox, NativeException, native_exc,\ - new_array, init_array, typeof - -System = CLR.System -Assembly = CLR.System.Reflection.Assembly -TargetInvocationException = NativeException(CLR.System.Reflection.TargetInvocationException) -AmbiguousMatchException = NativeException(CLR.System.Reflection.AmbiguousMatchException) - -def get_method(space, b_type, name, b_paramtypes): - try: - method = b_type.GetMethod(name, b_paramtypes) - except AmbiguousMatchException: - msg = 'Multiple overloads for %s could match' - raise operationerrfmt(space.w_TypeError, msg, name) - if method is None: - msg = 'No overloads for %s could match' - raise operationerrfmt(space.w_TypeError, msg, name) - return method - -def get_constructor(space, b_type, b_paramtypes): - try: - ctor = b_type.GetConstructor(b_paramtypes) - except AmbiguousMatchException: - msg = 'Multiple constructors could match' - raise OperationError(space.w_TypeError, space.wrap(msg)) - if ctor is None: - msg = 'No overloads for constructor could match' - raise OperationError(space.w_TypeError, space.wrap(msg)) - return ctor - -def rewrap_args(space, w_args, startfrom): - args = space.unpackiterable(w_args) - paramlen = len(args)-startfrom - b_args = new_array(System.Object, paramlen) - b_paramtypes = new_array(System.Type, paramlen) - for i in range(startfrom, len(args)): - j = i-startfrom - b_obj = py2cli(space, args[i]) - b_args[j] = b_obj - if b_obj is None: - b_paramtypes[j] = typeof(System.Object) # we really can't be more precise - else: - b_paramtypes[j] = b_obj.GetType() # XXX: potentially inefficient - return b_args, b_paramtypes - - -def call_method(space, b_obj, b_type, name, w_args, startfrom): - b_args, b_paramtypes = rewrap_args(space, w_args, startfrom) - b_meth = get_method(space, b_type, name, b_paramtypes) - try: - # for an explanation of the box() call, see the log message for revision 35167 - b_res = box(b_meth.Invoke(b_obj, b_args)) - except TargetInvocationException, e: - b_inner = native_exc(e).get_InnerException() - message = str(b_inner.get_Message()) - # TODO: use the appropriate exception, not StandardError - raise OperationError(space.w_StandardError, space.wrap(message)) - if b_meth.get_ReturnType().get_Name() == 'Void': - return space.w_None - else: - return cli2py(space, b_res) - - at unwrap_spec(typename=str, methname=str) -def call_staticmethod(space, typename, methname, w_args): - """ - Call a .NET static method. - - Parameters: - - - typename: the fully qualified .NET name of the class - containing the method (e.g. ``System.Math``) - - - methname: the name of the static method to call (e.g. ``Abs``) - - - args: a list containing the arguments to be passed to the - method. - """ - b_type = System.Type.GetType(typename) # XXX: cache this! - return call_method(space, None, b_type, methname, w_args, 0) - -def py2cli(space, w_obj): - try: - cliobj = space.getattr(w_obj, space.wrap('__cliobj__')) - except OperationError, e: - if e.match(space, space.w_AttributeError): - # it hasn't got a __cloobj__ - return w_obj.tocli() - else: - raise - else: - if isinstance(cliobj, W_CliObject): - return cliobj.b_obj # unwrap it! - else: - # this shouldn't happen! Fallback to the default impl - return w_obj.tocli() - -def cli2py(space, b_obj): - # TODO: support other types and find the most efficient way to - # select the correct case - if b_obj is None: - return space.w_None - - w_obj = unbox(b_obj, W_Root) - if w_obj is not None: - return w_obj # it's already a wrapped object! - - b_type = b_obj.GetType() - if b_type == typeof(System.Int32): - intval = unbox(b_obj, ootype.Signed) - return space.wrap(intval) - elif b_type == typeof(System.Double): - floatval = unbox(b_obj, ootype.Float) - return space.wrap(floatval) - elif b_type == typeof(System.Boolean): - boolval = unbox(b_obj, ootype.Bool) - return space.wrap(boolval) - elif b_type == typeof(System.String): - strval = unbox(b_obj, ootype.String) - return space.wrap(strval) - else: - namespace, classname = split_fullname(b_type.ToString()) - assemblyname = b_type.get_Assembly().get_FullName() - w_cls = load_cli_class(space, assemblyname, namespace, classname) - cliobj = W_CliObject(space, b_obj) - return wrapper_from_cliobj(space, w_cls, cliobj) - -def split_fullname(name): - lastdot = name.rfind('.') - if lastdot < 0: - return '', name - return name[:lastdot], name[lastdot+1:] - -def wrap_list_of_tuples(space, lst): - list_w = [] - for (a,b,c,d) in lst: - items_w = [space.wrap(a), space.wrap(b), space.wrap(c), space.wrap(d)] - list_w.append(space.newtuple(items_w)) - return space.newlist(list_w) - -def wrap_list_of_pairs(space, lst): - list_w = [] - for (a,b) in lst: - items_w = [space.wrap(a), space.wrap(b)] - list_w.append(space.newtuple(items_w)) - return space.newlist(list_w) - -def wrap_list_of_strings(space, lst): - list_w = [space.wrap(s) for s in lst] - return space.newlist(list_w) - -def get_methods(space, b_type): - methods = [] - staticmethods = [] - b_methodinfos = b_type.GetMethods() - for i in range(len(b_methodinfos)): - b_meth = b_methodinfos[i] - if b_meth.get_IsPublic(): - if b_meth.get_IsStatic(): - staticmethods.append(str(b_meth.get_Name())) - else: - methods.append(str(b_meth.get_Name())) - w_staticmethods = wrap_list_of_strings(space, staticmethods) - w_methods = wrap_list_of_strings(space, methods) - return w_staticmethods, w_methods - -def get_properties(space, b_type): - properties = [] - indexers = {} - b_propertyinfos = b_type.GetProperties() - for i in range(len(b_propertyinfos)): - b_prop = b_propertyinfos[i] - get_name = None - set_name = None - is_static = False - if b_prop.get_CanRead(): - get_meth = b_prop.GetGetMethod() - get_name = get_meth.get_Name() - is_static = get_meth.get_IsStatic() - if b_prop.get_CanWrite(): - set_meth = b_prop.GetSetMethod() - if set_meth: - set_name = set_meth.get_Name() - is_static = set_meth.get_IsStatic() - b_indexparams = b_prop.GetIndexParameters() - if len(b_indexparams) == 0: - properties.append((b_prop.get_Name(), get_name, set_name, is_static)) - else: - indexers[b_prop.get_Name(), get_name, set_name, is_static] = None - w_properties = wrap_list_of_tuples(space, properties) - w_indexers = wrap_list_of_tuples(space, indexers.keys()) - return w_properties, w_indexers - -class _CliClassCache: - def __init__(self): - self.cache = {} - - def put(self, fullname, cls): - assert fullname not in self.cache - self.cache[fullname] = cls - - def get(self, fullname): - return self.cache.get(fullname, None) -CliClassCache = _CliClassCache() - -class _AssembliesInfo: - w_namespaces = None - w_classes = None - w_generics = None - w_info = None # a tuple containing (w_namespaces, w_classes, w_generics) -AssembliesInfo = _AssembliesInfo() - -def save_info_for_assembly(space, b_assembly): - info = AssembliesInfo - b_types = b_assembly.GetTypes() - w_assemblyName = space.wrap(b_assembly.get_FullName()) - for i in range(len(b_types)): - b_type = b_types[i] - namespace = b_type.get_Namespace() - fullname = b_type.get_FullName() - if '+' in fullname: - # it's an internal type, skip it - continue - if namespace is not None: - # builds all possible sub-namespaces - # (e.g. 'System', 'System.Windows', 'System.Windows.Forms') - chunks = namespace.split(".") - temp_name = chunks[0] - space.setitem(info.w_namespaces, space.wrap(temp_name), space.w_None) - for chunk in chunks[1:]: - temp_name += "."+chunk - space.setitem(info.w_namespaces, space.wrap(temp_name), space.w_None) - if b_type.get_IsGenericType(): - index = fullname.rfind("`") - assert index >= 0 - pyName = fullname[0:index] - space.setitem(info.w_classes, space.wrap(pyName), w_assemblyName) - space.setitem(info.w_generics, space.wrap(pyName), space.wrap(fullname)) - else: - space.setitem(info.w_classes, space.wrap(fullname), w_assemblyName) - - -def save_info_for_std_assemblies(space): - # in theory we should use Assembly.Load, but it doesn't work with - # pythonnet because it thinks it should use the Load(byte[]) overload - b_mscorlib = Assembly.LoadWithPartialName(assemblyname.mscorlib) - b_System = Assembly.LoadWithPartialName(assemblyname.System) - save_info_for_assembly(space, b_mscorlib) - save_info_for_assembly(space, b_System) - -def get_assemblies_info(space): - info = AssembliesInfo - if info.w_info is None: - info.w_namespaces = space.newdict() - info.w_classes = space.newdict() - info.w_generics = space.newdict() - info.w_info = space.newtuple([info.w_namespaces, info.w_classes, info.w_generics]) - save_info_for_std_assemblies(space) - return info.w_info - -#_______________________________________________________________________________ -# AddReference* methods - -# AddReference', 'AddReferenceByName', 'AddReferenceByPartialName', 'AddReferenceToFile', 'AddReferenceToFileAndPath' - - at unwrap_spec(name=str) -def AddReferenceByPartialName(space, name): - b_assembly = Assembly.LoadWithPartialName(name) - if b_assembly is not None: - save_info_for_assembly(space, b_assembly) - - - at unwrap_spec(assemblyname=str, namespace=str, classname=str) -def load_cli_class(space, assemblyname, namespace, classname): - """ - Load the given .NET class into the PyPy interpreter and return a - Python class referencing to it. - - Parameters: - - - namespace: the full name of the namespace containing the - class (e.g., ``System.Collections``). - - - classname: the name of the class in the specified namespace - (e.g. ``ArrayList``). """ - fullname = '%s.%s' % (namespace, classname) - w_cls = CliClassCache.get(fullname) - if w_cls is None: - w_cls = build_cli_class(space, namespace, classname, fullname, assemblyname) - CliClassCache.put(fullname, w_cls) - return w_cls - -def build_cli_class(space, namespace, classname, fullname, assemblyname): - assembly_qualified_name = '%s, %s' % (fullname, assemblyname) - b_type = System.Type.GetType(assembly_qualified_name) - if b_type is None: - raise operationerrfmt(space.w_ImportError, - "Cannot load .NET type: %s", fullname) - - # this is where we locate the interfaces inherited by the class - # set the flag hasIEnumerable if IEnumerable interface has been by the class - hasIEnumerable = b_type.GetInterface("System.Collections.IEnumerable") is not None - - # this is where we test if the class is Generic - # set the flag isClassGeneric - isClassGeneric = False - if b_type.get_IsGenericType(): - isClassGeneric = True - - w_staticmethods, w_methods = get_methods(space, b_type) - w_properties, w_indexers = get_properties(space, b_type) - return build_wrapper(space, - space.wrap(namespace), - space.wrap(classname), - space.wrap(assemblyname), - w_staticmethods, - w_methods, - w_properties, - w_indexers, - space.wrap(hasIEnumerable), - space.wrap(isClassGeneric)) - - -class W_CliObject(W_Root): - def __init__(self, space, b_obj): - self.space = space - self.b_obj = b_obj - - @unwrap_spec(name=str, startfrom=int) - def call_method(self, name, w_args, startfrom=0): - return call_method(self.space, self.b_obj, self.b_obj.GetType(), name, w_args, startfrom) - - at unwrap_spec(typename=str) -def cli_object_new(space, w_subtype, typename, w_args): - b_type = System.Type.GetType(typename) - b_args, b_paramtypes = rewrap_args(space, w_args, 0) - b_ctor = get_constructor(space, b_type, b_paramtypes) - try: - b_obj = b_ctor.Invoke(b_args) - except TargetInvocationException, e: - b_inner = native_exc(e).get_InnerException() - message = str(b_inner.get_Message()) - # TODO: use the appropriate exception, not StandardError - raise OperationError(space.w_StandardError, space.wrap(message)) - return space.wrap(W_CliObject(space, b_obj)) - -W_CliObject.typedef = TypeDef( - '_CliObject_internal', - __new__ = interp2app(cli_object_new), - call_method = interp2app(W_CliObject.call_method), - ) - -path, _ = os.path.split(__file__) -app_clr = os.path.join(path, 'app_clr.py') -app = ApplevelClass(file(app_clr).read()) -del path, app_clr -build_wrapper = app.interphook("build_wrapper") -wrapper_from_cliobj = app.interphook("wrapper_from_cliobj") diff --git a/pypy/module/clr/test/__init__.py b/pypy/module/clr/test/__init__.py deleted file mode 100644 --- a/pypy/module/clr/test/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# diff --git a/pypy/module/clr/test/test_clr.py b/pypy/module/clr/test/test_clr.py deleted file mode 100644 --- a/pypy/module/clr/test/test_clr.py +++ /dev/null @@ -1,292 +0,0 @@ -from pypy.module.clr.assemblyname import mscorlib - -def skip_if_not_pythonnet(): - import py - try: - import clr - except ImportError: - py.test.skip('Must use pythonnet to access .NET libraries') - -skip_if_not_pythonnet() - -class AppTestDotnet: - spaceconfig = dict(usemodules=('clr',)) - - def setup_class(cls): - cls.w_mscorlib = cls.space.wrap(mscorlib) - - def test_cliobject(self): - import clr - obj = clr._CliObject_internal('System.Collections.ArrayList', []) - max_index = obj.call_method('Add', [42]) - assert max_index == 0 - - def test_cache(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - ArrayList2 = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - assert ArrayList is ArrayList2 - - def test_load_fail(self): - import clr - raises(ImportError, clr.load_cli_class, self.mscorlib, 'Foo', 'Bar') - - def test_ArrayList(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - obj.Add(43) - total = obj.get_Item(0) + obj.get_Item(1) - assert total == 42+43 - - def test_ArrayList_error(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - raises(StandardError, obj.get_Item, 0) - - def test_float_conversion(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42.0) - item = obj.get_Item(0) - assert isinstance(item, float) - - def test_bool_conversion(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(True) - obj.Add(False) - t = obj.get_Item(0) - f = obj.get_Item(1) - assert t and isinstance(t, bool) - assert not f and isinstance(f, bool) - obj.Add(42) - assert obj.Contains(42) - - def test_getitem(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - assert obj[0] == 42 - - def test_property(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - obj.Add(42) - assert obj.Count == 1 - obj.Capacity = 10 - assert obj.Capacity == 10 - - def test_unboundmethod(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - ArrayList.Add(obj, 42) - assert obj.get_Item(0) == 42 - - def test_unboundmethod_typeerror(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - raises(TypeError, ArrayList.Add) - raises(TypeError, ArrayList.Add, 0) - - def test_overload(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList() - for i in range(10): - obj.Add(i) - assert obj.IndexOf(7) == 7 - assert obj.IndexOf(7, 0, 5) == -1 - - def test_wrong_overload(self): - import clr - Math = clr.load_cli_class(self.mscorlib, 'System', 'Math') - raises(TypeError, Math.Abs, "foo") - - def test_wrong_overload_ctor(self): - from System.Collections import ArrayList - raises(TypeError, ArrayList, "foo") - - def test_staticmethod(self): - import clr - Math = clr.load_cli_class(self.mscorlib, 'System', 'Math') - res = Math.Abs(-42) - assert res == 42 - assert type(res) is int - res = Math.Abs(-42.0) - assert res == 42.0 - assert type(res) is float - - def test_constructor_args(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - obj = ArrayList(42) - assert obj.Capacity == 42 - - def test_None_as_null(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - Hashtable = clr.load_cli_class(self.mscorlib, 'System.Collections', 'Hashtable') - x = ArrayList() - x.Add(None) - assert x[0] is None - y = Hashtable() - assert y["foo"] is None - - def test_pass_opaque_arguments(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - class Foo: - pass - obj = Foo() - x = ArrayList() - x.Add(obj) - obj2 = x[0] - assert obj is obj2 - - def test_string_wrapping(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - x.Add("bar") - s = x[0] - assert s == "bar" - - def test_static_property(self): - import clr - import os - Environment = clr.load_cli_class(self.mscorlib, 'System', 'Environment') - assert Environment.CurrentDirectory == os.getcwd() - Environment.CurrentDirectory == '/' - assert Environment.CurrentDirectory == os.getcwd() - - def test_GetEnumerator(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - enum = x.GetEnumerator() - assert enum.MoveNext() is False - - def test_iteration_arrayList(self): - import clr - ArrayList = clr.load_cli_class(self.mscorlib, 'System.Collections', 'ArrayList') - x = ArrayList() - x.Add(1) - x.Add(2) - x.Add(3) - x.Add(4) - sum = 0 - for i in x: - sum += i - assert sum == 1+2+3+4 - - def test_iteration_stack(self): - import clr - Stack = clr.load_cli_class(self.mscorlib, 'System.Collections', 'Stack') - obj = Stack() - obj.Push(1) - obj.Push(54) - obj.Push(21) - sum = 0 - for i in obj: - sum += i - assert sum == 1+54+21 - - def test_load_generic_class(self): - import clr - ListInt = clr.load_cli_class(self.mscorlib, "System.Collections.Generic", "List`1[System.Int32]") - x = ListInt() - x.Add(42) - x.Add(4) - x.Add(4) - sum = 0 - for i in x: - sum += i - assert sum == 42+4+4 - - def test_generic_class_typeerror(self): - import clr - ListInt = clr.load_cli_class(self.mscorlib, "System.Collections.Generic", "List`1[System.Int32]") - x = ListInt() - raises(TypeError, x.Add, "test") - - def test_generic_dict(self): - import clr - genDictIntStr = clr.load_cli_class(self.mscorlib, - "System.Collections.Generic", - "Dictionary`2[System.Int32,System.String]") - x = genDictIntStr() - x[1] = "test" - x[2] = "rest" - assert x[1] == "test" - assert x[2] == "rest" - raises(TypeError, x.__setitem__, 3, 3) - raises(TypeError, x.__setitem__, 4, 4.453) - raises(TypeError, x.__setitem__, "test", 3) - - def test_generic_metaclass_list(self): - import clr - from System.Collections.Generic import List - import System.Int32 - lst = List[System.Int32]() - lst.Add(42) - assert lst[0] == 42 - raises(TypeError, lst.Add, "test") - - lst = List[int]() - lst.Add(42) - assert lst[0] == 42 - raises(TypeError, lst.Add, "test") - - def test_generic_metaclass_dict(self): - import clr - from System.Collections.Generic import Dictionary - import System.Int32 - import System.String - d1 = Dictionary[System.Int32, System.String]() - d1[42]="test" - assert d1[42] == "test" - raises(TypeError, d1.__setitem__, 42, 42) - - d1 = Dictionary[int, str]() - d1[42]="test" - assert d1[42] == "test" - raises(TypeError, d1.__setitem__, 42, 42) - - def test_generic_metaclass_object(self): - import clr - from System.Collections.Generic import List - class Foo(object): - pass - lst = List[Foo]() - f = Foo() - lst.Add(f) - assert lst[0] is f - - def test_generic_metaclass_typeerror(self): - import clr - from System.Collections.Generic import List - raises(TypeError, "List[int, int]") - - def test_py2cli_cliobjects(self): - from System.IO import StreamReader, MemoryStream - mem = MemoryStream(100) - sr = StreamReader(mem) # does not raise - - def test_external_assemblies(self): - import clr - clr.AddReferenceByPartialName('System.Xml') - from System.IO import StringReader - from System.Xml import XmlReader - buffer = StringReader("test") - xml = XmlReader.Create(buffer) - xml.ReadStartElement("foo") - assert xml.ReadString() == 'test' - xml.ReadEndElement() diff --git a/pypy/module/clr/test/test_importer.py b/pypy/module/clr/test/test_importer.py deleted file mode 100644 --- a/pypy/module/clr/test/test_importer.py +++ /dev/null @@ -1,76 +0,0 @@ -from pypy.module.clr.test.test_clr import skip_if_not_pythonnet - -skip_if_not_pythonnet() - -class AppTestDotnet: - spaceconfig = dict(usemodules=('clr',)) - - def test_list_of_namespaces_and_classes(self): - import clr - ns, classes, generics = clr.get_assemblies_info() - - assert 'System' in ns - assert 'System.Collections' in ns - assert 'System.Runtime' in ns - assert 'System.Runtime.InteropServices' in ns - - assert 'System' not in classes - assert 'System.Math' in classes - assert 'System.Collections.ArrayList' in classes - - assert 'System.Collections.Generic.List' in classes - assert generics['System.Collections.Generic.List'] == 'System.Collections.Generic.List`1' - - def test_import_hook_simple(self): - mscorlib = 'mscorlib, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089' - import clr - import System.Math - - assert System.Math.Abs(-5) == 5 - assert System.Math.Pow(2, 5) == 2**5 - - Math = clr.load_cli_class(mscorlib, 'System', 'Math') - assert Math is System.Math - - import System - a = System.Collections.Stack() - a.Push(3) - a.Push(44) - sum = 0 - for i in a: - sum += i - assert sum == 3+44 - - import System.Collections.ArrayList - ArrayList = clr.load_cli_class(mscorlib, 'System.Collections', 'ArrayList') - assert ArrayList is System.Collections.ArrayList - - def test_ImportError(self): - def fn(): - import non_existent_module - raises(ImportError, fn) - - def test_import_twice(self): - import System - s1 = System - import System - assert s1 is System - - def test_lazy_import(self): - import System - System.Runtime.InteropServices # does not raise attribute error - - def test_generic_class_import(self): - import System.Collections.Generic.List - - def test_import_from(self): - from System.Collections import ArrayList - - def test_AddReferenceByPartialName(self): - import clr - clr.AddReferenceByPartialName('System.Xml') - import System.Xml.XmlReader # does not raise - - def test_AddReference_early(self): - import clr - clr.AddReferenceByPartialName('System.Xml') diff --git a/pypy/module/clr/test/test_interp_clr.py b/pypy/module/clr/test/test_interp_clr.py deleted file mode 100644 --- a/pypy/module/clr/test/test_interp_clr.py +++ /dev/null @@ -1,10 +0,0 @@ -from pypy.module.clr.interp_clr import split_fullname - -def test_split_fullname(): - split = split_fullname - assert split('Foo') == ('', 'Foo') - assert split('System.Foo') == ('System', 'Foo') - assert split('System.Foo.Bar') == ('System.Foo', 'Bar') - assert split('System.Foo.A+B') == ('System.Foo', 'A+B') - assert split('System.') == ('System', '') - diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -229,7 +229,7 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return chunks.apply(orig_arr) + return chunks.apply(space, orig_arr) def descr_setitem(self, space, orig_arr, w_index, w_value): try: @@ -238,7 +238,7 @@ except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) - view = chunks.apply(orig_arr) + view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value) def transpose(self, orig_array): @@ -269,14 +269,14 @@ shape, skip) return iter.MultiDimViewIterator(self, self.dtype, self.start, r[0], r[1], shape) - def swapaxes(self, orig_arr, axis1, axis2): + def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] strides = self.get_strides()[:] backstrides = self.get_backstrides()[:] shape[axis1], shape[axis2] = shape[axis2], shape[axis1] strides[axis1], strides[axis2] = strides[axis2], strides[axis1] backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] - return W_NDimArray.new_slice(self.start, strides, + return W_NDimArray.new_slice(space, self.start, strides, backstrides, shape, self, orig_arr) def get_storage_as_int(self, space): @@ -289,13 +289,16 @@ return ArrayBuffer(self) def astype(self, space, dtype): - new_arr = W_NDimArray.from_shape(self.get_shape(), dtype) + strides, backstrides = support.calc_strides(self.get_shape(), dtype, + self.order) + impl = ConcreteArray(self.get_shape(), dtype, self.order, + strides, backstrides) if self.dtype.is_str_or_unicode() and not dtype.is_str_or_unicode(): raise OperationError(space.w_NotImplementedError, space.wrap( "astype(%s) not implemented yet" % self.dtype)) else: - loop.setslice(space, new_arr.get_shape(), new_arr.implementation, self) - return new_arr + loop.setslice(space, impl.get_shape(), impl, self) + return impl class ConcreteArrayNotOwning(BaseConcreteArray): diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -139,7 +139,7 @@ if not new_shape: return self if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(new_shape, self.dtype) + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) arr_iter.setitem(self.value) return arr.implementation @@ -152,7 +152,7 @@ def create_axis_iter(self, shape, dim, cum): raise Exception("axis iter should not happen on scalar") - def swapaxes(self, orig_array, axis1, axis2): + def swapaxes(self, space, orig_array, axis1, axis2): raise Exception("should not be called") def fill(self, w_value): @@ -166,7 +166,7 @@ return space.wrap(0) def astype(self, space, dtype): - return W_NDimArray.new_scalar(space, dtype, self.value) + raise Exception("should not be called") def base(self): return None diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -126,7 +126,7 @@ axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(arr.get_shape(), dtype) + index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -10,6 +10,15 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) +def wrap_impl(space, w_cls, w_instance, impl): + if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): + w_ret = W_NDimArray(impl) + else: + w_ret = space.allocate_instance(W_NDimArray, w_cls) + W_NDimArray.__init__(w_ret, impl) + assert isinstance(w_ret, W_NDimArray) + space.call_method(w_ret, '__array_finalize__', w_instance) + return w_ret class ArrayArgumentException(Exception): pass @@ -20,10 +29,11 @@ def __init__(self, implementation): assert isinstance(implementation, BaseArrayImplementation) + assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod - def from_shape(shape, dtype, order='C'): + def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: @@ -32,10 +42,12 @@ strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) + if w_instance: + return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @staticmethod - def from_shape_and_storage(shape, storage, dtype, order='C', owning=False): + def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None): from pypy.module.micronumpy.arrayimpl import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) @@ -46,15 +58,20 @@ else: impl = concrete.ConcreteArrayNotOwning(shape, dtype, order, strides, backstrides, storage) + if w_subtype: + w_ret = space.allocate_instance(W_NDimArray, w_subtype) + W_NDimArray.__init__(w_ret, impl) + space.call_method(w_ret, '__array_finalize__', w_subtype) + return w_ret return W_NDimArray(impl) @staticmethod - def new_slice(offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): from pypy.module.micronumpy.arrayimpl import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) - return W_NDimArray(impl) + return wrap_impl(space, space.type(orig_arr), orig_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -35,10 +35,12 @@ class BadToken(Exception): pass + SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring","count_nonzero", "argsort"] TWO_ARG_FUNCTIONS = ["dot", 'take'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -184,14 +186,23 @@ def is_true(self, w_obj): assert isinstance(w_obj, BoolObject) - return False - #return w_obj.boolval + return w_obj.boolval def is_w(self, w_obj, w_what): return w_obj is w_what + def issubtype(self, w_type1, w_type2): + return BoolObject(True) + def type(self, w_obj): - return w_obj.tp From noreply at buildbot.pypy.org Sun Jul 28 13:43:29 2013 From: noreply at buildbot.pypy.org (bivab) Date: Sun, 28 Jul 2013 13:43:29 +0200 (CEST) Subject: [pypy-commit] buildbot default: add an jit/app-level builder triggerd by the arm/raring build Message-ID: <20130728114329.378E21C00D8@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r831:4719464ec096 Date: 2013-07-28 13:42 +0200 http://bitbucket.org/pypy/buildbot/changeset/4719464ec096/ Log: add an jit/app-level builder triggerd by the arm/raring build diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -56,7 +56,8 @@ + crosstranslationjitargs), platform='linux-armhf-raring', interpreter='pypy', - prefix=['schroot', '-c', 'raring']) + prefix=['schroot', '-c', 'raring'], + trigger='JITLINUXARMHF_RARING_scheduler') pypyARMJITTranslatedTestFactory = pypybuilds.TranslatedTests( translationArgs=(crosstranslationargs @@ -89,6 +90,15 @@ app_tests=True, platform='linux-armhf-raspbian', ) +pypyARMHF_RARING_JITTranslatedTestFactory = pypybuilds.TranslatedTests( + translationArgs=(crosstranslationargs + + jit_translation_args + + crosstranslationjitargs), + lib_python=True, + pypyjit=True, + app_tests=True, + platform='linux-armhf-raring', + ) # APPLVLLINUXARM = "pypy-c-app-level-linux-armel" APPLVLLINUXARMHF_v7 = "pypy-c-app-level-linux-armhf-v7" @@ -97,6 +107,7 @@ JITLINUXARM = "pypy-c-jit-linux-armel" JITLINUXARMHF_v7 = "pypy-c-jit-linux-armhf-v7" JITLINUXARMHF_RASPBIAN = "pypy-c-jit-linux-armhf-raspbian" +JITLINUXARMHF_RARING = "pypy-c-jit-linux-armhf-raring" JITBACKENDONLYLINUXARMEL = "jitbackendonly-own-linux-armel" JITBACKENDONLYLINUXARMHF = "jitbackendonly-own-linux-armhf" @@ -140,6 +151,10 @@ JITLINUXARMHF_RASPBIAN, # triggered by BUILDJITLINUXARMHF_RASPBIAN JITLINUXARMHF_v7, # triggered by BUILDJITLINUXARMHF_RASPBIAN, on cubieboard-bob ]), + + Triggerable("JITLINUXARMHF_RARING_scheduler", [ + JITLINUXARMHF_RARING, # triggered by BUILDJITLINUXARMHF_RARING + ]) ] builders = [ @@ -216,6 +231,12 @@ 'category': 'linux-armhf', "locks": [ARMBoardLock.access('counting')], }, + {"name": JITLINUXARMHF_RARING, + "slavenames": ["greenbox3-node0"], + 'builddir': JITLINUXARMHF_RARING, + 'factory': pypyARMHF_RARING_JITTranslatedTestFactory, + 'category': 'linux-armhf', + }, # Translation Builders for ARM {"name": BUILDLINUXARM, "slavenames": ['hhu-cross-armel'], From noreply at buildbot.pypy.org Sun Jul 28 14:07:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 14:07:55 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Ah ha, found out that there was an assert preventing accesses below the Message-ID: <20130728120755.C7CCE1C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65737:332b124416bb Date: 2013-07-28 13:37 +0200 http://bitbucket.org/pypy/pypy/changeset/332b124416bb/ Log: Ah ha, found out that there was an assert preventing accesses below the value of esp, which was just silently killed. Reintroduce the assert and write comments. diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -89,7 +89,8 @@ _location_code = 's' def __init__(self, value, type): - self.value = value + assert value >= 0 # accessing values < 0 is forbidden on x86-32. + self.value = value # (on x86-64 we could allow values down to -128) self.type = type def _getregkey(self): From noreply at buildbot.pypy.org Sun Jul 28 14:07:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 14:07:57 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Cleanups; avoid changes from "default" that are now purely gratuitous. Message-ID: <20130728120757.488B01C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65738:d1054bc1d03f Date: 2013-07-28 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d1054bc1d03f/ Log: Cleanups; avoid changes from "default" that are now purely gratuitous. diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,10 +1,9 @@ from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.lltypesystem import llmemory from rpython.jit.metainterp import history -from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr, BoxInt +from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.codewriter import heaptracker -from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.backend.llsupport.descr import SizeDescr, ArrayDescr from rpython.jit.metainterp.history import JitCellToken diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -1,15 +1,14 @@ from rpython.jit.backend.llsupport.descr import get_size_descr,\ get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ - SizeDescrWithVTable, get_interiorfield_descr, get_call_descr + SizeDescrWithVTable, get_interiorfield_descr from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ GcLLDescr_framework from rpython.jit.backend.llsupport import jitframe -from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.gc import get_description from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.codewriter.heaptracker import register_known_gctype -from rpython.jit.metainterp.history import JitCellToken, FLOAT, ConstInt +from rpython.jit.metainterp.history import JitCellToken, FLOAT from rpython.rtyper.lltypesystem import lltype, rclass, rffi from rpython.jit.backend.x86.arch import WORD @@ -88,7 +87,6 @@ casmdescr.compiled_loop_token = clt tzdescr = None # noone cares # - namespace.update(locals()) # for funcname in self.gc_ll_descr._generated_functions: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -141,7 +141,7 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._load_shadowstack_top_in_reg(mc, gcrootmap) + self._load_shadowstack_top_in_ebx(mc, gcrootmap) mc.MOV_mr((ebx.value, -WORD), eax.value) mc.MOV_bi(gcmap_ofs, 0) @@ -729,7 +729,7 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._call_header_shadowstack(self.mc, gcrootmap) + self._call_header_shadowstack(gcrootmap) def _call_header_with_stack_check(self): self._call_header() @@ -752,7 +752,7 @@ def _call_footer(self): gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._call_footer_shadowstack(self.mc, gcrootmap) + self._call_footer_shadowstack(gcrootmap) for i in range(len(self.cpu.CALLEE_SAVE_REGISTERS)-1, -1, -1): self.mc.MOV_rs(self.cpu.CALLEE_SAVE_REGISTERS[i].value, @@ -762,41 +762,41 @@ self.mc.ADD_ri(esp.value, FRAME_FIXED_SIZE * WORD) self.mc.RET() - def _load_shadowstack_top_in_reg(self, mc, gcrootmap, selected_reg=ebx): - """Loads the shadowstack top in selected reg, and returns an integer + def _load_shadowstack_top_in_ebx(self, mc, gcrootmap): + """Loads the shadowstack top in ebx, and returns an integer that gives the address of the stack top. If this integer doesn't fit in 32 bits, it will be loaded in r11. """ rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): - mc.MOV_rj(selected_reg.value, rst) # MOV ebx, [rootstacktop] + mc.MOV_rj(ebx.value, rst) # MOV ebx, [rootstacktop] else: mc.MOV_ri(X86_64_SCRATCH_REG.value, rst) # MOV r11, rootstacktop - mc.MOV_rm(selected_reg.value, (X86_64_SCRATCH_REG.value, 0)) + mc.MOV_rm(ebx.value, (X86_64_SCRATCH_REG.value, 0)) # MOV ebx, [r11] # return rst - def _call_header_shadowstack(self, mc, gcrootmap, selected_reg=ebx): - rst = self._load_shadowstack_top_in_reg(mc, gcrootmap, selected_reg) - mc.MOV_mr((selected_reg.value, 0), ebp.value) # MOV [ebx], ebp - mc.ADD_ri(selected_reg.value, WORD) + def _call_header_shadowstack(self, gcrootmap): + rst = self._load_shadowstack_top_in_ebx(self.mc, gcrootmap) + self.mc.MOV_mr((ebx.value, 0), ebp.value) # MOV [ebx], ebp + self.mc.ADD_ri(ebx.value, WORD) if rx86.fits_in_32bits(rst): - mc.MOV_jr(rst, selected_reg.value) # MOV [rootstacktop], ebx + self.mc.MOV_jr(rst, ebx.value) # MOV [rootstacktop], ebx else: # The integer 'rst' doesn't fit in 32 bits, so we know that # _load_shadowstack_top_in_ebx() above loaded it in r11. # Reuse it. Be careful not to overwrite r11 in the middle! - mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), - selected_reg.value) # MOV [r11], ebx + self.mc.MOV_mr((X86_64_SCRATCH_REG.value, 0), + ebx.value) # MOV [r11], ebx - def _call_footer_shadowstack(self, mc, gcrootmap, selected_reg=ebx): + def _call_footer_shadowstack(self, gcrootmap): rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): - mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD + self.mc.SUB_ji8(rst, WORD) # SUB [rootstacktop], WORD else: - mc.MOV_ri(selected_reg.value, rst) # MOV ebx, rootstacktop - mc.SUB_mi8((selected_reg.value, 0), WORD) # SUB [ebx], WORD + self.mc.MOV_ri(ebx.value, rst) # MOV ebx, rootstacktop + self.mc.SUB_mi8((ebx.value, 0), WORD) # SUB [ebx], WORD def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking From noreply at buildbot.pypy.org Sun Jul 28 14:07:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 14:07:58 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Backing out an undocumented unsafe change to this file sneaked with the Message-ID: <20130728120758.84F3A1C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65739:00021276e9c8 Date: 2013-07-28 13:42 +0200 http://bitbucket.org/pypy/pypy/changeset/00021276e9c8/ Log: Backing out an undocumented unsafe change to this file sneaked with the apparently unrelated 8d11a494f1a9 diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -263,10 +263,8 @@ funcobj = op.args[0].value._obj if funcobj.random_effects_on_gcobjs: return True - except lltype.DelayedPointer: + except (AttributeError, lltype.DelayedPointer): return True # better safe than sorry - except AttributeError: - return False return super(RandomEffectsAnalyzer, self).analyze_external_call( op, seen) From noreply at buildbot.pypy.org Sun Jul 28 14:07:59 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 14:07:59 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Remove again this unused class Message-ID: <20130728120759.EDD891C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65740:9de7dcdcdded Date: 2013-07-28 13:45 +0200 http://bitbucket.org/pypy/pypy/changeset/9de7dcdcdded/ Log: Remove again this unused class diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -5,7 +5,7 @@ from rpython.jit.codewriter.flatten import ListOfKind, IndirectCallTargets from rpython.jit.codewriter.policy import log from rpython.jit.metainterp import quasiimmut -from rpython.jit.metainterp.history import getkind, AbstractDescr +from rpython.jit.metainterp.history import getkind from rpython.jit.metainterp.typesystem import deref, arrayItem from rpython.jit.metainterp.blackhole import BlackholeInterpreter from rpython.flowspace.model import SpaceOperation, Variable, Constant, c_last_exception @@ -16,15 +16,6 @@ from rpython.rtyper.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY from rpython.translator.unsimplify import varoftype -class IntDescr(AbstractDescr): - """ Disguise int as a descr - """ - def __init__(self, v): - self.v = v - - def getint(self): - return self.v - class UnsupportedMallocFlags(Exception): pass From noreply at buildbot.pypy.org Sun Jul 28 14:08:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 14:08:01 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: 'jit_conditional_call' in the RPython program should never be executed. Message-ID: <20130728120801.2EC361C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65741:30f47cedbeab Date: 2013-07-28 13:59 +0200 http://bitbucket.org/pypy/pypy/changeset/30f47cedbeab/ Log: 'jit_conditional_call' in the RPython program should never be executed. Complain more loudly if it is. diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -455,7 +455,7 @@ return self.generic_call(FUNC, fnexpr, op.args[1:], op.result) def OP_JIT_CONDITIONAL_CALL(self, op): - return '' + return 'abort(); /* jit_conditional_call */' # low-level operations def generic_get(self, op, sourceexpr): From noreply at buildbot.pypy.org Sun Jul 28 15:03:50 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 28 Jul 2013 15:03:50 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: Simplify makerepr and makekey Message-ID: <20130728130350.EB5A81C3227@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65742:52c5500cb939 Date: 2013-07-28 02:31 +0100 http://bitbucket.org/pypy/pypy/changeset/52c5500cb939/ Log: Simplify makerepr and makekey diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -157,10 +157,9 @@ raise KeyError(search) def makekey(self, s_obj): - return pair(self.type_system, s_obj).rtyper_makekey(self) - - def _makerepr(self, s_obj): - return pair(self.type_system, s_obj).rtyper_makerepr(self) + if hasattr(s_obj, "rtyper_makekey_ex"): + return s_obj.rtyper_makekey_ex(self) + return s_obj.rtyper_makekey() def getrepr(self, s_obj): # s_objs are not hashable... try hard to find a unique key anyway @@ -170,7 +169,7 @@ result = self.reprs[key] except KeyError: self.reprs[key] = None - result = self._makerepr(s_obj) + result = s_obj.rtyper_makerepr(self) assert not isinstance(result.lowleveltype, ContainerType), ( "missing a Ptr in the type specification " "of %s:\n%r" % (s_obj, result.lowleveltype)) diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -145,17 +145,3 @@ LowLevelTypeSystem.instance = LowLevelTypeSystem() getfunctionptr = LowLevelTypeSystem.instance.getcallable - -# Multiple dispatch on type system and high-level annotation - -from rpython.tool.pairtype import pairtype -from rpython.annotator.model import SomeObject - -class __extend__(pairtype(TypeSystem, SomeObject)): - def rtyper_makerepr((ts, s_obj), rtyper): - return s_obj.rtyper_makerepr(rtyper) - - def rtyper_makekey((ts, s_obj), rtyper): - if hasattr(s_obj, "rtyper_makekey_ex"): - return s_obj.rtyper_makekey_ex(rtyper) - return s_obj.rtyper_makekey() From noreply at buildbot.pypy.org Sun Jul 28 15:03:52 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 28 Jul 2013 15:03:52 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: Remove type_system argument from RPythonTyper.__init__ Message-ID: <20130728130352.610C01C3227@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65743:bf9c9135b763 Date: 2013-07-28 13:04 +0100 http://bitbucket.org/pypy/pypy/changeset/bf9c9135b763/ Log: Remove type_system argument from RPythonTyper.__init__ diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -59,7 +59,7 @@ t.buildannotator().build_types(function, [int] * len(args), main_entry_point=True) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() warmrunnerdesc = WarmRunnerDesc(t, translate_support_code=True, CPUClass=self.CPUClass, **kwds) diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -21,9 +21,9 @@ self.callcontrol = CallControl(cpu, jitdrivers_sd) self._seen_files = set() - def transform_func_to_jitcode(self, func, values, type_system='lltype'): + def transform_func_to_jitcode(self, func, values): """For testing.""" - rtyper = support.annotate(func, values, type_system=type_system) + rtyper = support.annotate(func, values) graph = rtyper.annotator.translator.graphs[0] jitcode = JitCode("test") self.transform_graph_to_jitcode(graph, jitcode, True) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -34,7 +34,7 @@ return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, - type_system="lltype", translationoptions={}): + translationoptions={}): # build the normal ll graphs for ll_function t = TranslationContext() for key, value in translationoptions.items(): @@ -43,7 +43,7 @@ a = t.buildannotator(policy=annpolicy) argtypes = getargtypes(a, values) a.build_types(func, argtypes, main_entry_point=True) - rtyper = t.buildrtyper(type_system = type_system) + rtyper = t.buildrtyper() rtyper.specialize() #if inline: # auto_inlining(t, threshold=inline) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -49,7 +49,7 @@ class FakeCPU: class tracker: pass - + def __init__(self, rtyper): rtyper._builtin_func_for_spec_cache = FakeDict() self.rtyper = rtyper @@ -123,8 +123,8 @@ class TestFlatten: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def encoding_test(self, func, args, expected, diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -13,8 +13,8 @@ class TestRegAlloc: - def make_graphs(self, func, values, type_system='lltype'): - self.rtyper = support.annotate(func, values, type_system=type_system) + def make_graphs(self, func, values): + self.rtyper = support.annotate(func, values) return self.rtyper.annotator.translator.graphs def check_assembler(self, graph, expected, transform=False, diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -50,7 +50,7 @@ FakeWarmRunnerState.enable_opts = {} func._jit_unroll_safe_ = True - rtyper = support.annotate(func, values, type_system=type_system, + rtyper = support.annotate(func, values, translationoptions=translationoptions) graphs = rtyper.annotator.translator.graphs testself.all_graphs = graphs @@ -210,7 +210,7 @@ def interp_operations(self, f, args, **kwds): # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, self.type_system, **kwds) + _get_jitcodes(self, self.CPUClass, f, args, **kwds) # try to run it with blackhole.py result1 = _run_with_blackhole(self, args) # try to run it with pyjitpl.py diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -27,7 +27,7 @@ x1 = vref() # jit_force_virtual virtual_ref_finish(vref, x) # - _get_jitcodes(self, self.CPUClass, fn, [], self.type_system) + _get_jitcodes(self, self.CPUClass, fn, []) graph = self.all_graphs[0] assert graph.name == 'fn' self.vrefinfo.replace_force_virtual_with_call([graph]) diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -36,8 +36,7 @@ return res def gengraph(func, argtypes=[], viewbefore='auto', policy=None, - type_system="lltype", backendopt=False, config=None, - **extraconfigopts): + backendopt=False, config=None, **extraconfigopts): t = TranslationContext(config=config) t.config.set(**extraconfigopts) a = t.buildannotator(policy=policy) @@ -48,7 +47,7 @@ a.simplify() t.view() global typer # we need it for find_exception - typer = t.buildrtyper(type_system=type_system) + typer = t.buildrtyper() timelog("rtyper-specializing", typer.specialize) #t.view() timelog("checking graphs", t.checkgraphs) @@ -88,9 +87,8 @@ policy = AnnotatorPolicy() t, typer, graph = gengraph(func, [annotation(x) for x in values], - viewbefore, policy, type_system=type_system, - backendopt=backendopt, config=config, - **extraconfigopts) + viewbefore, policy, backendopt=backendopt, + config=config, **extraconfigopts) interp = LLInterpreter(typer) _tcache[key] = (t, interp, graph) # keep the cache small diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1439,7 +1439,7 @@ t = TranslationContext() s = t.buildannotator().build_types(f, []) - rtyper = t.buildrtyper(type_system=self.type_system) + rtyper = t.buildrtyper() rtyper.specialize() s_A_list = s.items[0] @@ -1467,7 +1467,7 @@ t = TranslationContext() s = t.buildannotator().build_types(f, []) - rtyper = t.buildrtyper(type_system=self.type_system) + rtyper = t.buildrtyper() rtyper.specialize() s_A_list = s.items[0] diff --git a/rpython/rtyper/test/test_rtuple.py b/rpython/rtyper/test/test_rtuple.py --- a/rpython/rtyper/test/test_rtuple.py +++ b/rpython/rtyper/test/test_rtuple.py @@ -159,7 +159,7 @@ t = TranslationContext() s = t.buildannotator().build_types(f, []) - rtyper = t.buildrtyper(type_system=self.type_system) + rtyper = t.buildrtyper() rtyper.specialize() s_AB_tup = s.items[0] diff --git a/rpython/rtyper/test/tool.py b/rpython/rtyper/test/tool.py --- a/rpython/rtyper/test/tool.py +++ b/rpython/rtyper/test/tool.py @@ -8,7 +8,7 @@ def gengraph(self, func, argtypes=[], viewbefore='auto', policy=None, backendopt=False, config=None): - return gengraph(func, argtypes, viewbefore, policy, type_system=self.type_system, + return gengraph(func, argtypes, viewbefore, policy, backendopt=backendopt, config=config) def interpret(self, fn, args, **kwds): diff --git a/rpython/translator/backendopt/test/test_all.py b/rpython/translator/backendopt/test/test_all.py --- a/rpython/translator/backendopt/test/test_all.py +++ b/rpython/translator/backendopt/test/test_all.py @@ -48,7 +48,7 @@ def translateopt(self, func, sig, **optflags): t = TranslationContext() t.buildannotator().build_types(func, sig) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() if option.view: t.view() backend_optimizations(t, **optflags) @@ -265,7 +265,7 @@ t = TranslationContext() t.buildannotator().build_types(main, [int]) - t.buildrtyper(type_system='lltype').specialize() + t.buildrtyper().specialize() exctransformer = t.getexceptiontransformer() exctransformer.create_exception_handling(graphof(t, common)) from rpython.annotator import model as annmodel diff --git a/rpython/translator/backendopt/test/test_canraise.py b/rpython/translator/backendopt/test/test_canraise.py --- a/rpython/translator/backendopt/test/test_canraise.py +++ b/rpython/translator/backendopt/test/test_canraise.py @@ -7,7 +7,7 @@ def translate(self, func, sig): t = TranslationContext() t.buildannotator().build_types(func, sig) - t.buildrtyper(type_system='lltype').specialize() + t.buildrtyper().specialize() if option.view: t.view() return t, RaiseAnalyzer(t) diff --git a/rpython/translator/backendopt/test/test_finalizer.py b/rpython/translator/backendopt/test/test_finalizer.py --- a/rpython/translator/backendopt/test/test_finalizer.py +++ b/rpython/translator/backendopt/test/test_finalizer.py @@ -19,7 +19,7 @@ func_to_analyze = func t = TranslationContext() t.buildannotator().build_types(func, sig) - t.buildrtyper(type_system='lltype').specialize() + t.buildrtyper().specialize() if backendopt: backend_optimizations(t) if option.view: diff --git a/rpython/translator/backendopt/test/test_inline.py b/rpython/translator/backendopt/test/test_inline.py --- a/rpython/translator/backendopt/test/test_inline.py +++ b/rpython/translator/backendopt/test/test_inline.py @@ -52,7 +52,7 @@ def translate(self, func, argtypes): t = TranslationContext() t.buildannotator().build_types(func, argtypes) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() return t def check_inline(self, func, in_func, sig, entry=None, diff --git a/rpython/translator/backendopt/test/test_malloc.py b/rpython/translator/backendopt/test/test_malloc.py --- a/rpython/translator/backendopt/test/test_malloc.py +++ b/rpython/translator/backendopt/test/test_malloc.py @@ -34,7 +34,7 @@ remover = self.MallocRemover() t = TranslationContext() t.buildannotator().build_types(fn, signature) - t.buildrtyper(type_system='lltype').specialize() + t.buildrtyper().specialize() graph = graphof(t, fn) if inline is not None: from rpython.translator.backendopt.inline import auto_inline_graphs diff --git a/rpython/translator/backendopt/test/test_mallocv.py b/rpython/translator/backendopt/test/test_mallocv.py --- a/rpython/translator/backendopt/test/test_mallocv.py +++ b/rpython/translator/backendopt/test/test_mallocv.py @@ -37,7 +37,7 @@ t = TranslationContext() self.translator = t t.buildannotator().build_types(fn, signature) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() graph = graphof(t, fn) if option.view: t.view() diff --git a/rpython/translator/backendopt/test/test_storesink.py b/rpython/translator/backendopt/test/test_storesink.py --- a/rpython/translator/backendopt/test/test_storesink.py +++ b/rpython/translator/backendopt/test/test_storesink.py @@ -12,7 +12,7 @@ def translate(self, func, argtypes): t = TranslationContext() t.buildannotator().build_types(func, argtypes) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() return t def check(self, f, argtypes, no_getfields=0): diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py b/rpython/translator/backendopt/test/test_writeanalyze.py --- a/rpython/translator/backendopt/test/test_writeanalyze.py +++ b/rpython/translator/backendopt/test/test_writeanalyze.py @@ -14,7 +14,7 @@ def translate(self, func, sig): t = TranslationContext() t.buildannotator().build_types(func, sig) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() if option.view: t.view() return t, self.Analyzer(t) diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -346,7 +346,7 @@ def task_rtype_lltype(self): """ RTyping - lltype version """ - rtyper = self.translator.buildrtyper(type_system='lltype') + rtyper = self.translator.buildrtyper() rtyper.specialize(dont_simplify_again=True) @taskdef([RTYPE], "JIT compiler generation") diff --git a/rpython/translator/test/test_exceptiontransform.py b/rpython/translator/test/test_exceptiontransform.py --- a/rpython/translator/test/test_exceptiontransform.py +++ b/rpython/translator/test/test_exceptiontransform.py @@ -36,7 +36,7 @@ def transform_func(self, fn, inputtypes, backendopt=False): t = TranslationContext() t.buildannotator().build_types(fn, inputtypes) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() if option.view: t.view() if backendopt: @@ -158,7 +158,7 @@ return x + 1 t = TranslationContext() t.buildannotator().build_types(f, [int]) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() g = graphof(t, f) etrafo = exceptiontransform.ExceptionTransformer(t) etrafo.create_exception_handling(g) @@ -170,7 +170,7 @@ raise ValueError t = TranslationContext() t.buildannotator().build_types(f, [int]) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() g = graphof(t, f) etrafo = exceptiontransform.ExceptionTransformer(t) etrafo.create_exception_handling(g) @@ -233,7 +233,7 @@ return s.x t = TranslationContext() t.buildannotator().build_types(f, [int]) - t.buildrtyper(type_system=self.type_system).specialize() + t.buildrtyper().specialize() g = graphof(t, f) etrafo = exceptiontransform.ExceptionTransformer(t) etrafo.create_exception_handling(g) diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -335,7 +335,7 @@ t.buildannotator().build_types(func, argtypes) if option.view: t.view() - t.buildrtyper(self.typesystem).specialize() + t.buildrtyper().specialize() backend_optimizations(t) if option.view: t.view() diff --git a/rpython/translator/test/test_unsimplify.py b/rpython/translator/test/test_unsimplify.py --- a/rpython/translator/test/test_unsimplify.py +++ b/rpython/translator/test/test_unsimplify.py @@ -11,7 +11,7 @@ t = TranslationContext() t.buildannotator().build_types(func, argtypes) t.entry_point_graph = graphof(t, func) - t.buildrtyper(type_system=type_system).specialize() + t.buildrtyper().specialize() return graphof(t, func), t def test_split_blocks_simple(): diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -74,14 +74,13 @@ self.annotator = RPythonAnnotator(self, policy=policy) return self.annotator - def buildrtyper(self, type_system="lltype"): + def buildrtyper(self): if self.annotator is None: raise ValueError("no annotator") if self.rtyper is not None: raise ValueError("we already have an rtyper") from rpython.rtyper.rtyper import RPythonTyper - self.rtyper = RPythonTyper(self.annotator, - type_system=type_system) + self.rtyper = RPythonTyper(self.annotator) return self.rtyper def getexceptiontransformer(self): From noreply at buildbot.pypy.org Sun Jul 28 15:29:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 15:29:27 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Fix on x86-32 Message-ID: <20130728132927.249C81C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65744:5ff6858cd72f Date: 2013-07-28 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/5ff6858cd72f/ Log: Fix on x86-32 diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -86,7 +86,7 @@ if self.cpu.IS_64_BIT: assert nos == [0, 1, 31] else: - assert nos == [4, 5, 25] + assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): assert nos == [9, 10, 47] else: From noreply at buildbot.pypy.org Sun Jul 28 15:43:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 15:43:47 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: Fix this test for cond_call Message-ID: <20130728134347.3C5C21C030B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: fast-slowpath Changeset: r65745:e8646d8bfe61 Date: 2013-07-28 15:49 +0200 http://bitbucket.org/pypy/pypy/changeset/e8646d8bfe61/ Log: Fix this test for cond_call diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -347,10 +347,11 @@ guard_not_invalidated? i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) - # Will be killed by the backend p15 = getfield_gc(p8, descr=) i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + i18 = int_lt(i17, i15) + # a cond call to _ll_list_resize_hint_really_look_inside_iff + cond_call(i18, _, p8, i15, 1, descr=) guard_no_exception(descr=...) p17 = getfield_gc(p8, descr=) setarrayitem_gc(p17, i13, i12, descr=) From noreply at buildbot.pypy.org Sun Jul 28 15:56:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 15:56:51 +0200 (CEST) Subject: [pypy-commit] stmgc default: "static inline" functions are not quite as good as macros. gcc seems to Message-ID: <20130728135651.4DBCB1C030B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r468:454ae0c1eb07 Date: 2013-07-28 15:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/454ae0c1eb07/ Log: "static inline" functions are not quite as good as macros. gcc seems to inline the functions partially only. diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -69,8 +69,10 @@ do stm_write_barrier() again if we ended the transaction, or if we did a potential collection (e.g. stm_allocate()). */ -static inline gcptr stm_read_barrier(gcptr); -static inline gcptr stm_write_barrier(gcptr); +#if 0 // (optimized version below) +gcptr stm_read_barrier(gcptr); +gcptr stm_write_barrier(gcptr); +#endif /* start a new transaction, calls callback(), and when it returns finish that transaction. callback() is called with the 'arg' @@ -139,16 +141,12 @@ /************************************************************/ -/* macro-like functionality */ +/* macro functionality */ extern __thread gcptr *stm_shadowstack; -static inline void stm_push_root(gcptr obj) { - *stm_shadowstack++ = obj; -} -static inline gcptr stm_pop_root(void) { - return *--stm_shadowstack; -} +#define stm_push_root(obj) (*stm_shadowstack++ = (obj)) +#define stm_pop_root() (*--stm_shadowstack) extern __thread revision_t stm_private_rev_num; gcptr stm_DirectReadBarrier(gcptr); @@ -160,21 +158,18 @@ (*(gcptr *)(stm_read_barrier_cache + ((revision_t)(obj) & FX_MASK))) #define UNLIKELY(test) __builtin_expect(test, 0) -static inline gcptr stm_read_barrier(gcptr obj) { - /* XXX optimize to get the smallest code */ - if (UNLIKELY((obj->h_revision != stm_private_rev_num) && - (FXCACHE_AT(obj) != obj))) - obj = stm_DirectReadBarrier(obj); - return obj; -} -static inline gcptr stm_write_barrier(gcptr obj) { - if (UNLIKELY((obj->h_revision != stm_private_rev_num) | - ((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0))) - obj = stm_WriteBarrier(obj); - return obj; -} -#undef UNLIKELY +#define stm_read_barrier(obj) \ + (UNLIKELY(((obj)->h_revision != stm_private_rev_num) & \ + (FXCACHE_AT(obj) != (obj))) ? \ + stm_DirectReadBarrier(obj) \ + : (obj)) + +#define stm_write_barrier(obj) \ + (UNLIKELY(((obj)->h_revision != stm_private_rev_num) | \ + (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ + stm_WriteBarrier(obj) \ + : (obj)) #endif From noreply at buildbot.pypy.org Sun Jul 28 16:05:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 16:05:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: Better code this way Message-ID: <20130728140514.83DE11C030B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r469:0ebfd6dd4f46 Date: 2013-07-28 16:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/0ebfd6dd4f46/ Log: Better code this way diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -160,13 +160,13 @@ #define UNLIKELY(test) __builtin_expect(test, 0) #define stm_read_barrier(obj) \ - (UNLIKELY(((obj)->h_revision != stm_private_rev_num) & \ + (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ (FXCACHE_AT(obj) != (obj))) ? \ stm_DirectReadBarrier(obj) \ : (obj)) #define stm_write_barrier(obj) \ - (UNLIKELY(((obj)->h_revision != stm_private_rev_num) | \ + (UNLIKELY(((obj)->h_revision != stm_private_rev_num) || \ (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ stm_WriteBarrier(obj) \ : (obj)) From noreply at buildbot.pypy.org Sun Jul 28 19:43:47 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 19:43:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed some more ootyep specific things. Message-ID: <20130728174347.79CD91C00D8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65746:b4d07b830269 Date: 2013-07-28 10:43 -0700 http://bitbucket.org/pypy/pypy/changeset/b4d07b830269/ Log: Removed some more ootyep specific things. diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -317,7 +317,6 @@ s_iterable = args_s[0] if isinstance(s_iterable, (SomeList, SomeDict)): lst = SomeList(lst.listdef) # create a fresh copy - lst.known_maxlength = True lst.listdef.resize() lst.listdef.listitem.hint_maxlength = True elif 'fence' in hints: diff --git a/rpython/jit/tl/conftest.py b/rpython/jit/tl/conftest.py deleted file mode 100644 --- a/rpython/jit/tl/conftest.py +++ /dev/null @@ -1,5 +0,0 @@ -def pytest_addoption(parser): - group = parser.getgroup("pypyjit.py options") - group.addoption('--ootype', action="store_true", dest="ootype", - default=False, - help="use ootype") diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -160,11 +160,9 @@ return self.model.ConstFloat(self.model.convert_to_floatstorage(arg)) if (arg.startswith('"') or arg.startswith("'") or arg.startswith('s"')): - # XXX ootype info = arg[1:].strip("'\"") return self.model.get_const_ptr_for_string(info) if arg.startswith('u"'): - # XXX ootype info = arg[1:].strip("'\"") return self.model.get_const_ptr_for_unicode(info) if arg.startswith('ConstClass('): diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -553,8 +553,7 @@ def _normalize(x): if not isinstance(x, str): TYPE = lltype.typeOf(x) - if (isinstance(TYPE, lltype.Ptr) and TYPE.TO._name == 'rpy_string' - or getattr(TYPE, '_name', '') == 'String'): # ootype + if isinstance(TYPE, lltype.Ptr) and TYPE.TO._name == 'rpy_string': from rpython.rtyper.annlowlevel import hlstr return hlstr(x) return x diff --git a/rpython/rtyper/lltypesystem/rlist.py b/rpython/rtyper/lltypesystem/rlist.py --- a/rpython/rtyper/lltypesystem/rlist.py +++ b/rpython/rtyper/lltypesystem/rlist.py @@ -32,8 +32,7 @@ class BaseListRepr(AbstractBaseListRepr): rstr_ll = rstr.LLHelpers - # known_maxlength is ignored by lltype but used by ootype - def __init__(self, rtyper, item_repr, listitem=None, known_maxlength=False): + def __init__(self, rtyper, item_repr, listitem=None): self.rtyper = rtyper self.LIST = GcForwardReference() self.lowleveltype = Ptr(self.LIST) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -48,16 +48,14 @@ # of recursive structures -- i.e. if the listdef contains itself rlist = rtyper.type_system.rlist item_repr = lambda: rtyper.getrepr(listitem.s_value) - known_maxlength = getattr(self, 'known_maxlength', False) if self.listdef.listitem.resized: - return rlist.ListRepr(rtyper, item_repr, listitem, known_maxlength) + return rlist.ListRepr(rtyper, item_repr, listitem) else: return rlist.FixedSizeListRepr(rtyper, item_repr, listitem) def rtyper_makekey(self): self.listdef.listitem.dont_change_any_more = True - known_maxlength = getattr(self, 'known_maxlength', False) - return self.__class__, self.listdef.listitem, known_maxlength + return self.__class__, self.listdef.listitem class AbstractBaseListRepr(Repr): diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -38,7 +38,7 @@ } def backend_to_typesystem(backend): - return _BACKEND_TO_TYPESYSTEM.get(backend, 'ootype') + return _BACKEND_TO_TYPESYSTEM[backend] # set of translation steps to profile PROFILE = set([]) @@ -558,7 +558,6 @@ assert 'rpython.rtyper.rmodel' not in sys.modules, ( "cannot fork because the rtyper has already been imported") prereq_checkpt_rtype_lltype = prereq_checkpt_rtype - prereq_checkpt_rtype_ootype = prereq_checkpt_rtype # checkpointing support def _event(self, kind, goal, func): From noreply at buildbot.pypy.org Sun Jul 28 19:47:42 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 28 Jul 2013 19:47:42 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: Actually remove type_system argument from RPythonTyper.__init__() Message-ID: <20130728174742.204621C2FC5@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65747:d4e96e6dcae7 Date: 2013-07-28 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/d4e96e6dcae7/ Log: Actually remove type_system argument from RPythonTyper.__init__() diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1069,7 +1069,7 @@ if getattr(graph, 'func', None) is f] init_graph = t._graphof(Frame.__init__.im_func) - deref = t.rtyper.type_system_deref + deref = t.rtyper.type_system.deref def direct_calls(graph): return [deref(op.args[0].value)._callable.func_name diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -32,19 +32,10 @@ class RPythonTyper(object): from rpython.rtyper.rmodel import log - def __init__(self, annotator, type_system="lltype"): + def __init__(self, annotator): self.annotator = annotator - self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) - - if isinstance(type_system, str): - if type_system == "lltype": - self.type_system = LowLevelTypeSystem.instance - else: - raise TyperError("Unknown type system %r!" % type_system) - else: - self.type_system = type_system - self.type_system_deref = self.type_system.deref + self.type_system = LowLevelTypeSystem.instance self.reprs = {} self._reprs_must_call_setup = [] self._seen_reprs_must_call_setup = {} @@ -940,7 +931,7 @@ # build the 'direct_call' operation f = self.rtyper.getcallable(graph) c = inputconst(typeOf(f), f) - fobj = self.rtyper.type_system_deref(f) + fobj = self.rtyper.type_system.deref(f) return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) diff --git a/rpython/rtyper/test/test_rvirtualizable2.py b/rpython/rtyper/test/test_rvirtualizable2.py --- a/rpython/rtyper/test/test_rvirtualizable2.py +++ b/rpython/rtyper/test/test_rvirtualizable2.py @@ -339,7 +339,7 @@ g(a) t, typer, graph = self.gengraph(f, []) - deref = typer.type_system_deref + deref = typer.type_system.deref desc = typer.annotator.bookkeeper.getdesc(g) g_graphs = desc._cache.items() From noreply at buildbot.pypy.org Sun Jul 28 19:55:32 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 19:55:32 +0200 (CEST) Subject: [pypy-commit] pypy default: removed more references to the ootyep backends, particularly in the docs Message-ID: <20130728175532.502111C2FC5@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65748:ea18f777ec85 Date: 2013-07-28 10:54 -0700 http://bitbucket.org/pypy/pypy/changeset/ea18f777ec85/ Log: removed more references to the ootyep backends, particularly in the docs diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py --- a/pypy/testrunner_cfg.py +++ b/pypy/testrunner_cfg.py @@ -2,7 +2,7 @@ import os DIRS_SPLIT = [ - 'translator/c', 'translator/jvm', 'rlib', + 'translator/c', 'rlib', 'rpython/memory', 'jit/metainterp', 'rpython/test', 'jit/backend/arm', 'jit/backend/x86', ] diff --git a/tddium.yml b/tddium.yml --- a/tddium.yml +++ b/tddium.yml @@ -10,7 +10,6 @@ - pypy/**/test_*.py - rpython/**/test_*.py - exclude: pypy/module/test_lib_pypy/ctypes_tests/** # don't run in CPython - - exclude: rpython/jit/backend/cli/** # bitrotted AFAICT - exclude: rpython/jit/backend/llvm/** # bitrotted AFAICT # and things requiring a fix in Tddium, omitted to avoid confusion: - exclude: rpython/rlib/unicodedata/test/test_ucd.py # need wide build From noreply at buildbot.pypy.org Sun Jul 28 20:32:14 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 20:32:14 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed a ton of documenation about ootypesystem. Message-ID: <20130728183214.0AAD01C36E5@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65749:66ce9bd318a9 Date: 2013-07-28 11:31 -0700 http://bitbucket.org/pypy/pypy/changeset/66ce9bd318a9/ Log: Removed a ton of documenation about ootypesystem. diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -85,14 +85,9 @@ from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ @@ -123,7 +118,6 @@ .. _`testing methods`: coding-guide.html#testing-in-pypy .. _`translation`: translation.html .. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -194,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -236,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -84,12 +84,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -72,8 +72,6 @@ `command line reference`_ -`CLI backend`_ describes the details of the .NET backend. - `JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler from our Python interpreter. diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -432,226 +432,6 @@ See for example `rpython/rtyper/rlist.py`_. -.. _`oo type`: - -Object Oriented Types ---------------------- - -The standard `low-level type` model described above is fine for -targeting low level backends such as C, but it is not good -enough for targeting higher level backends such as .NET CLI or Java -JVM, so a new object oriented model has been introduced. This model is -implemented in the first part of `rpython/rtyper/ootypesystem/ootype.py`_. - -As for the low-level typesystem, the second part of -`rpython/rtyper/ootypesystem/ootype.py`_ is a runnable implementation of -these types, for testing purposes. - - -The target platform -+++++++++++++++++++ - -There are plenty of object oriented languages and platforms around, -each one with its own native features: they could be statically or -dynamically typed, they could support or not things like multiple -inheritance, classes and functions as first class order objects, -generics, and so on. - -The goal of *ootypesystem* is to define a trade-off between all -the potential backends that let them to use the native facilities when -available while not preventing other backends to work when they -aren't. - - -Types and classes -+++++++++++++++++ - -Most of the primitive types defined in *ootypesystem* are the very -same of those found in *lltypesystem*: ``Bool``, ``Signed``, -``Unsigned``, ``Float``, ``Char``, ``UniChar`` and ``Void``. - -The target platform is supposed to support classes and instances with -**single inheritance**. Instances of user-defined classes are mapped -to the ``Instance`` type, whose ``_superclass`` attribute indicates -the base class of the instance. At the very beginning of the -inheritance hierarchy there is the ``Root`` object, i.e. the common -base class between all instances; if the target platform has the -notion of a common base class too, the backend can choose to map the -``Root`` class to its native equivalent. - -Object of ``Instance`` type can have attributes and methods: -attributes are got and set by the ``oogetfield`` and ``oosetfield`` -operations, while method calls are expressed by the ``oosend`` -operation. - -Classes are passed around using the ``Class`` type: this is a first -order class type whose only goal is to allow **runtime instantiation** -of the class. Backends that don't support this feature natively, such -as Java, may need to use some sort of placeholder instead. - - -Static vs. dynamic typing -+++++++++++++++++++++++++ - -The target platform is assumed to be **statically typed**, i.e. the -type of each object is known at compile time. - -As usual, it is possible to convert an object from type to type only -under certain conditions; there is a number of predefined conversions -between primitive types such as from ``Bool`` to ``Signed`` or from -``Signed`` to ``Float``. For each one of these conversions there is a -corresponding low level operation, such as ``cast_bool_to_int`` and -``cast_int_to_float``. - -Moreover it is possible to cast instances of a class up and down the -inheritance hierarchy with the ``ooupcast`` and ``oodowncast`` low -level operations. Implicit upcasting is not allowed, so you really -need to do a ``ooupcast`` for converting from a subclass to a -superclass. - -With this design statically typed backends can trivially insert -appropriate casts when needed, while dynamically typed backends can -simply ignore some of the operation such as ``ooupcast`` and -``oodowncast``. Backends that supports implicit upcasting, such as CLI -and Java, can simply ignore only ``ooupcast``. - -Object model -++++++++++++ - -The object model implemented by ootype is quite Java-like. The -following is a list of key features of the ootype object model which -have a direct correspondence in the Java or .NET object model: - - - classes have a static set of strongly typed methods and - attributes; - - - methods can be overriden in subclasses; every method is "virtual" - (i.e., can be overridden); methods can be "abstract" (i.e., need - to be overridden in subclasses); - - - classes support single inheritance; all classes inherit directly - or indirectly from the ROOT class; - - - there is some support for method overloading. This feature is not - used by the RTyper itself because RPython doesn't support method - overloading, but it is used by the GenCLI backend for offering - access to the native .NET libraries (see XXX); - - - all classes, attributes and methods are public: ootype is only - used internally by the translator, so there is no need to enforce - accessibility rules; - - - classes and functions are first-class order objects: this feature - can be easily simulated by backends for platforms on which it is not - a native feature; - - - there is a set of `built-in types`_ offering standard features. - -Exception handling -++++++++++++++++++ - -Since flow graphs are meant to be used also for very low level -backends such as C, they are quite unstructured: this means that the -target platform doesn't need to have a native exception handling -mechanism, since at the very least the backend can handle exceptions -just like ``genc`` does. - -By contrast we know that most of high level platforms natively support -exception handling, so *ootypesystem* is designed to let them to use -it. In particular the exception instances are typed with the -``Instance`` type, so the usual inheritance exception hierarchy is -preserved and the native way to catch exception should just work. - -.. `built-in types`_ - -Built-in types -++++++++++++++ - -It seems reasonable to assume high level platforms to provide built-in -facilities for common types such as *lists* or *hashtables*. - -RPython standard types such as ``List`` and ``Dict`` are implemented -on top of these common types; at the moment of writing there are six -built-in types: - - - **String**: self-descriptive - - - **StringBuilder**: used for dynamic building of string - - - **List**: a variable-sized, homogeneous list of object - - - **Dict**: a hashtable of homogeneous keys and values - - - **CustomDict**: same as dict, but with custom equal and hash - functions - - - **DictItemsIterator**: a helper class for iterating over the - elements of a ``Dict`` - - -Each of these types is a subtype of ``BuiltinADTType`` and has set of -ADT (Abstract Data Type) methods (hence the name of the base class) -for being manipulated. Examples of ADT methods are ``ll_length`` for -``List`` and ``ll_get`` for ``Dict``. - -From the backend point of view an instance of a built-in types is -treated exactly as a plain ``Instance``, so usually no special-casing -is needed. The backend is supposed to provide a bunch of classes -wrapping the native ones in order to provide the right signature and -semantic for the ADT methods. - -As an alternative, backends can special-case the ADT types to map them -directly to the native equivalent, translating the method names -on-the-fly at compile time. - -Generics -++++++++ - -Some target platforms offer native support for **generics**, i.e. -classes that can be parametrized on types, not only values. For -example, if one wanted to create a list using generics, a possible -declaration would be to say ``List``, where ``T`` represented the -type. When instantiated, one could create ``List`` or -``List``. The list is then treated as a list of whichever type -is specified. - -Each subclass of ``BuiltinADTTypes`` defines a bunch of type -parameters by creating some class level placeholder in the form of -``PARAMNAME_T``; then it fills up the ``_GENERIC_METHODS`` attribute -by defining the signature of each of the ADT methods using those -placeholders in the appropriate places. As an example, here is an -extract of *ootypesystem*'s List type:: - - class List(BuiltinADTType): - # placeholders for types - SELFTYPE_T = object() - ITEMTYPE_T = object() - - ... - - def _init_methods(self): - # 'ITEMTYPE_T' is used as a placeholder for indicating - # arguments that should have ITEMTYPE type. 'SELFTYPE_T' indicates 'self' - - self._GENERIC_METHODS = frozendict({ - "ll_length": Meth([], Signed), - "ll_getitem_fast": Meth([Signed], self.ITEMTYPE_T), - "ll_setitem_fast": Meth([Signed, self.ITEMTYPE_T], Void), - "_ll_resize_ge": Meth([Signed], Void), - "_ll_resize_le": Meth([Signed], Void), - "_ll_resize": Meth([Signed], Void), - }) - - ... - -Thus backends that support generics can simply look for placeholders -for discovering where the type parameters are used. Backends that -don't support generics can simply use the ``Root`` class instead and -insert the appropriate casts where needed. Note that placeholders -might also stand for primitive types, which typically require more -involved casts: e.g. in Java, making wrapper objects around ints. - - HighLevelOp interface --------------------- diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -27,15 +27,10 @@ this task into several steps, and the purpose of this document is to introduce them. -As of the 1.2 release, RPython_ programs can be translated into the following -languages/platforms: C/POSIX, CLI/.NET -and Java/JVM. - .. _`application-level`: coding-guide.html#application-level .. _`interpreter-level`: coding-guide.html#interpreter-level -The choice of the target platform affects the process somewhat, but to -start with we describe the process of translating an RPython_ program into +To start with we describe the process of translating an RPython_ program into C (which is the default and original target). .. _`initialization time`: @@ -654,54 +649,6 @@ Use the :config:`translation.backend` option to choose which backend to use. - -The Object-Oriented Backends ----------------------------- - -The Object-Oriented backends target platforms that are less C-like and support -classes, instance etc. If such a platform is targeted, the `OO type system` is -used while rtyping. Of the OO backends, both gencli and genjava can translate -the full Python interpreter. - -.. _`oo type system`: rtyper.html#oo-type - -.. mention that pretty much all these backends are done by volunteers? - -GenCLI -++++++ - -GenCLI_ targets the `Common Language Infrastructure`_, the most famous -implementations of which are Microsoft's `.NET`_ and Mono_. - -It is the most advanced of the object oriented backends -- it can -compile the PyPy interpreter as well as our two standard benchmarks, -RPyStone (CPython's PyStone benchmark modified slightly to be RPython) -and a RPython version of the common Richards benchmark. - -It is almost entirely the work of Antonio Cuni, who started this -backend as part of his `Master's thesis`_, the Google's Summer of Code -2006 program and the Summer of PyPy program. - -.. _`Common Language Infrastructure`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`.NET`: http://www.microsoft.com/net/ -.. _Mono: http://www.mono-project.com/ -.. _`Master's thesis`: http://buildbot.pypy.org/misc/Implementing%20Python%20in%20.NET.pdf -.. _GenCLI: cli-backend.html - -GenJVM -++++++ - -GenJVM targets the Java Virtual Machine: it translates RPython -programs directly into Java bytecode, similarly to what GenCLI does. - -So far it is the second most mature high level backend after GenCLI: -it still can't translate the full Standard Interpreter, but after the -Leysin sprint we were able to compile and run the rpystone and -richards benchmarks. - -GenJVM is almost entirely the work of Niko Matsakis, who worked on it -also as part of the Summer of PyPy program. - .. _extfunccalls: External Function Calls From noreply at buildbot.pypy.org Sun Jul 28 21:18:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 21:18:47 +0200 (CEST) Subject: [pypy-commit] cffi default: We started to add bug fixes to libffi_msvc. Message-ID: <20130728191847.46F531C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1297:5877e7273c75 Date: 2013-07-28 21:18 +0200 http://bitbucket.org/cffi/cffi/changeset/5877e7273c75/ Log: We started to add bug fixes to libffi_msvc. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -55,10 +55,7 @@ COMPILE_LIBFFI = None if COMPILE_LIBFFI: - assert os.path.isdir(COMPILE_LIBFFI), ( - "On Windows, you need to copy the directory " - "Modules\\_ctypes\\libffi_msvc from the CPython sources (2.6 or 2.7) " - "into the top-level directory.") + assert os.path.isdir(COMPILE_LIBFFI), "directory not found!" include_dirs[:] = [COMPILE_LIBFFI] libraries[:] = [] _filenames = [filename.lower() for filename in os.listdir(COMPILE_LIBFFI)] From noreply at buildbot.pypy.org Sun Jul 28 21:28:58 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 28 Jul 2013 21:28:58 +0200 (CEST) Subject: [pypy-commit] pypy fast-slowpath: close to-be-merged branch Message-ID: <20130728192858.0D3041C00D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: fast-slowpath Changeset: r65750:1290b475ed8b Date: 2013-07-28 21:26 +0200 http://bitbucket.org/pypy/pypy/changeset/1290b475ed8b/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Sun Jul 28 21:28:59 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 28 Jul 2013 21:28:59 +0200 (CEST) Subject: [pypy-commit] pypy default: (fijal, arigo reviewing) Merge fast-slowpath branch. Message-ID: <20130728192859.D08961C00D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65751:f3f13c64a057 Date: 2013-07-28 21:28 +0200 http://bitbucket.org/pypy/pypy/changeset/f3f13c64a057/ Log: (fijal, arigo reviewing) Merge fast-slowpath branch. This branch adds a powerful abstraction to do a conditional call in the JIT without branching. It's written like this: from rpython.rlib import jit jit.conditional_call(condition, function, args) Which will not execute the call in the assembler if the condition is False, but will also not generate bridges ever. It's used so far only for ll_list_resize primitives. diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -347,10 +347,11 @@ guard_not_invalidated? i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) - # Will be killed by the backend p15 = getfield_gc(p8, descr=) i17 = arraylen_gc(p15, descr=) - call(_, p8, i15, descr=) # this is a call to _ll_list_resize_ge_trampoline__... + i18 = int_lt(i17, i15) + # a cond call to _ll_list_resize_hint_really_look_inside_iff + cond_call(i18, _, p8, i15, 1, descr=) guard_no_exception(descr=...) p17 = getfield_gc(p8, descr=) setarrayitem_gc(p17, i13, i12, descr=) diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -39,8 +39,8 @@ graph.startblock = newstartblock argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)] graph.signature = Signature(argnames) - # note that we can mostly ignore defaults: if nb_extra_args > 0, - # then defaults aren't applied. if nb_extra_args == 0, then this + # note that we can mostly ignore defaults: if nb_extra_args > 0, + # then defaults aren't applied. if nb_extra_args == 0, then this # just removes the *arg and the defaults keep their meaning. if nb_extra_args > 0: graph.defaults = None # shouldn't be used in this case @@ -66,7 +66,7 @@ if jit_look_inside: access_directly = True key = (AccessDirect, key) - break + break else: new_flags = s_obj.flags.copy() del new_flags['access_directly'] @@ -331,7 +331,7 @@ if key1 is not None: key = key + key1 return funcdesc.cachedgraph(key, builder=builder) - + def specialize_argvalue(funcdesc, args_s, *argindices): from rpython.annotator.model import SomePBC key = [] diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -259,6 +259,23 @@ else: self.wb_slowpath[withcards + 2 * withfloats] = rawstart + def _build_cond_call_slowpath(self, supports_floats, callee_only): + """ This builds a general call slowpath, for whatever call happens to + come. + """ + mc = InstrBuilder(self.cpu.cpuinfo.arch_version) + # + self._push_all_regs_to_jitframe(mc, [], self.cpu.supports_floats, callee_only) + ## args are in their respective positions + mc.PUSH([r.ip.value, r.lr.value]) + mc.BLX(r.r4.value) + self._reload_frame_if_necessary(mc) + self._pop_all_regs_from_jitframe(mc, [], supports_floats, + callee_only) + # return + mc.POP([r.ip.value, r.pc.value]) + return mc.materialize(self.cpu.asmmemmgr, []) + def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. The arguments are passed in r0 and r10, as follows: diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -301,6 +301,32 @@ self._check_frame_depth_debug(self.mc) return fcond + def cond_call(self, op, gcmap, cond_loc, call_loc, fcond): + assert call_loc is r.r4 + self.mc.TST_rr(cond_loc.value, cond_loc.value) + jmp_adr = self.mc.currpos() + self.mc.BKPT() # patched later + # + self.push_gcmap(self.mc, gcmap, store=True) + # + callee_only = False + floats = False + if self._regalloc is not None: + for reg in self._regalloc.rm.reg_bindings.values(): + if reg not in self._regalloc.rm.save_around_call_regs: + break + else: + callee_only = True + if self._regalloc.vfprm.reg_bindings: + floats = True + cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] + self.mc.BL(cond_call_adr) + self.pop_gcmap(self.mc) + # never any result value + pmc = OverwritingBuilder(self.mc, jmp_adr, WORD) + pmc.B_offs(self.mc.currpos(), c.EQ) # equivalent to 0 as result of TST above + return fcond + def emit_op_jump(self, op, arglocs, regalloc, fcond): target_token = op.getdescr() assert isinstance(target_token, TargetToken) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -164,7 +164,11 @@ def get_scratch_reg(self, type=INT, forbidden_vars=[], selected_reg=None): assert type == INT or type == REF - box = TempBox() + box = None + if type == INT: + box = TempInt() + else: + box = TempPtr() self.temp_boxes.append(box) reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, selected_reg=selected_reg) @@ -1126,6 +1130,24 @@ prepare_op_cond_call_gc_wb_array = prepare_op_cond_call_gc_wb + def prepare_op_cond_call(self, op, fcond): + assert op.result is None + assert 2 <= op.numargs() <= 4 + 2 + tmpreg = self.get_scratch_reg(INT, selected_reg=r.r4) + v = op.getarg(1) + assert isinstance(v, Const) + imm = self.rm.convert_to_imm(v) + self.assembler.regalloc_mov(imm, tmpreg) + args_so_far = [] + for i in range(2, op.numargs()): + reg = r.argument_regs[i - 2] + arg = op.getarg(i) + self.make_sure_var_in_reg(arg, args_so_far, selected_reg=reg) + args_so_far.append(arg) + loc_cond = self.make_sure_var_in_reg(op.getarg(0), args_so_far) + gcmap = self.get_gcmap([tmpreg]) + self.assembler.cond_call(op, gcmap, loc_cond, tmpreg, fcond) + def prepare_op_force_token(self, op, fcond): # XXX for now we return a regular reg res_loc = self.force_allocate_reg(op.result) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -286,7 +286,7 @@ def get_savedata_ref(self, deadframe): assert deadframe._saved_data is not None return deadframe._saved_data - + # ------------------------------------------------------------ def calldescrof(self, FUNC, ARGS, RESULT, effect_info): @@ -334,7 +334,7 @@ except KeyError: descr = InteriorFieldDescr(A, fieldname) self.descrs[key] = descr - return descr + return descr def _calldescr_dynamic_for_tests(self, atypes, rtype, abiname='FFI_DEFAULT_ABI'): @@ -802,7 +802,7 @@ else: ovf = False self.overflow_flag = ovf - return z + return z def execute_guard_no_overflow(self, descr): if self.overflow_flag: @@ -821,6 +821,12 @@ x = math.sqrt(y) return support.cast_to_floatstorage(x) + def execute_cond_call(self, calldescr, cond, func, *args): + if not cond: + return + # cond_call can't have a return value + self.execute_call(calldescr, func, *args) + def execute_call(self, calldescr, func, *args): effectinfo = calldescr.get_extra_info() if effectinfo is not None and hasattr(effectinfo, 'oopspecindex'): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -106,6 +106,10 @@ kind='unicode') else: self.malloc_slowpath_unicode = None + self.cond_call_slowpath = [self._build_cond_call_slowpath(False, False), + self._build_cond_call_slowpath(False, True), + self._build_cond_call_slowpath(True, False), + self._build_cond_call_slowpath(True, True)] self._build_stack_check_slowpath() self._build_release_gil(gc_ll_descr.gcrootmap) diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -78,7 +78,7 @@ def _candidate(self, node): return (node.val & 1 == 0) and (node.val + 1 == node.next.val) - + def _pop_two(self, tp): node = self.master_node if node is None or node.next is None: @@ -281,6 +281,7 @@ def __init__(self, longevity, frame_manager=None, assembler=None): self.free_regs = self.all_regs[:] + self.free_regs.reverse() self.longevity = longevity self.temp_boxes = [] if not we_are_translated(): @@ -381,7 +382,7 @@ loc = self.reg_bindings.get(v, None) if loc is not None and loc not in self.no_lower_byte_regs: return loc - for i in range(len(self.free_regs)): + for i in range(len(self.free_regs) - 1, -1, -1): reg = self.free_regs[i] if reg not in self.no_lower_byte_regs: if loc is not None: diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,5 +1,5 @@ from rpython.rlib.rarithmetic import ovfcheck -from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem import llmemory from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -84,9 +84,9 @@ nos.reverse() if self.cpu.backend_name.startswith('x86'): if self.cpu.IS_64_BIT: - assert nos == [11, 12, 31] + assert nos == [0, 1, 31] else: - assert nos == [4, 5, 25] + assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): assert nos == [9, 10, 47] else: @@ -690,6 +690,36 @@ item = rffi.cast(lltype.Ptr(S), frame.jf_frame[gcmap[0]]) assert item == new_items[2] + def test_shadowstack_cond_call(self): + cpu = self.cpu + cpu.gc_ll_descr.init_nursery(100) + cpu.setup_once() + + def check(i, frame): + frame = lltype.cast_opaque_ptr(JITFRAMEPTR, frame) + assert frame.jf_gcmap[0] # is not empty is good enough + + CHECK = lltype.FuncType([lltype.Signed, llmemory.GCREF], lltype.Void) + checkptr = llhelper(lltype.Ptr(CHECK), check) + checkdescr = cpu.calldescrof(CHECK, CHECK.ARGS, CHECK.RESULT, + EffectInfo.MOST_GENERAL) + + loop = self.parse(""" + [i0, p0] + p = force_token() + cond_call(i0, ConstClass(funcptr), i0, p, descr=calldescr) + guard_true(i0, descr=faildescr) [p0] + """, namespace={ + 'faildescr': BasicFailDescr(), + 'funcptr': checkptr, + 'calldescr': checkdescr, + }) + token = JitCellToken() + cpu.compile_loop(loop.inputargs, loop.operations, token) + S = self.S + s = lltype.malloc(S) + cpu.execute_token(token, 1, s) + def test_shadowstack_collecting_call_float(self): cpu = self.cpu diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2265,6 +2265,43 @@ value |= 32768 assert s.data.tid == value + def test_cond_call(self): + def func_void(*args): + called.append(args) + + for i in range(5): + called = [] + + FUNC = self.FuncType([lltype.Signed] * i, lltype.Void) + func_ptr = llhelper(lltype.Ptr(FUNC), func_void) + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo.MOST_GENERAL) + + ops = ''' + [i0, i1, i2, i3, i4, i5, i6, f0, f1] + cond_call(i1, ConstClass(func_ptr), %s) + guard_false(i0, descr=faildescr) [i1, i2, i3, i4, i5, i6, f0, f1] + ''' % ', '.join(['i%d' % (j + 2) for j in range(i)] + ["descr=calldescr"]) + loop = parse(ops, namespace={'faildescr': BasicFailDescr(), + 'func_ptr': func_ptr, + 'calldescr': calldescr}) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + f1 = longlong.getfloatstorage(1.2) + f2 = longlong.getfloatstorage(3.4) + frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, f1, f2) + assert not called + for j in range(5): + assert self.cpu.get_int_value(frame, j) == j + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 6)) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 7)) == 3.4 + frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, f1, f2) + assert called == [tuple(range(1, i + 1))] + for j in range(4): + assert self.cpu.get_int_value(frame, j + 1) == j + 1 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 6)) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, 7)) == 3.4 + def test_force_operations_returning_void(self): values = [] def maybe_force(token, flag): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -14,7 +14,7 @@ from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken from rpython.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, - gpr_reg_mgr_cls, xmm_reg_mgr_cls) + gpr_reg_mgr_cls, xmm_reg_mgr_cls, _register_arguments) from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) from rpython.jit.backend.x86.arch import (FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, @@ -149,6 +149,34 @@ mc.RET() self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) + def _build_cond_call_slowpath(self, supports_floats, callee_only): + """ This builds a general call slowpath, for whatever call happens to + come. + """ + mc = codebuf.MachineCodeBlockWrapper() + self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) + if IS_X86_64: + mc.SUB(esp, imm(WORD)) + self.set_extra_stack_depth(mc, 2 * WORD) + else: + # we want space for 3 arguments + call + alignment + # the caller is responsible for putting arguments in the right spot + mc.SUB(esp, imm(WORD * 7)) + self.set_extra_stack_depth(mc, 8 * WORD) + for i in range(4): + mc.MOV_sr(i * WORD, _register_arguments[i].value) + mc.CALL(eax) + if IS_X86_64: + mc.ADD(esp, imm(WORD)) + else: + mc.ADD(esp, imm(WORD * 7)) + self.set_extra_stack_depth(mc, 0) + self._reload_frame_if_necessary(mc, align_stack=True) + self._pop_all_regs_from_frame(mc, [], supports_floats, + callee_only) + mc.RET() + return mc.materialize(self.cpu.asmmemmgr, []) + def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. The arguments are passed in eax and edi, as follows: @@ -1729,7 +1757,8 @@ regs = gpr_reg_mgr_cls.all_regs for i, gpr in enumerate(regs): if gpr not in ignored_regs: - mc.MOV_br(i * WORD + base_ofs, gpr.value) + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + mc.MOV_br(v * WORD + base_ofs, gpr.value) if withfloats: if IS_X86_64: coeff = 1 @@ -1750,7 +1779,8 @@ regs = gpr_reg_mgr_cls.all_regs for i, gpr in enumerate(regs): if gpr not in ignored_regs: - mc.MOV_rb(gpr.value, i * WORD + base_ofs) + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + mc.MOV_rb(gpr.value, v * WORD + base_ofs) if withfloats: # Pop all XMM regs if IS_X86_64: @@ -2131,6 +2161,29 @@ def label(self): self._check_frame_depth_debug(self.mc) + def cond_call(self, op, gcmap, cond_loc, call_loc): + self.mc.TEST(cond_loc, cond_loc) + self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later + jmp_adr = self.mc.get_relative_pos() + self.push_gcmap(self.mc, gcmap, store=True) + callee_only = False + floats = False + if self._regalloc is not None: + for reg in self._regalloc.rm.reg_bindings.values(): + if reg not in self._regalloc.rm.save_around_call_regs: + break + else: + callee_only = True + if self._regalloc.xrm.reg_bindings: + floats = True + cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] + self.mc.CALL(imm(cond_call_adr)) + self.pop_gcmap(self.mc) + # never any result value + offset = self.mc.get_relative_pos() - jmp_adr + assert 0 < offset <= 127 + self.mc.overwrite(jmp_adr-1, chr(offset)) + def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned self.mc.MOV(eax, heap(nursery_free_adr)) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -119,6 +119,9 @@ for _i, _reg in enumerate(gpr_reg_mgr_cls.all_regs): gpr_reg_mgr_cls.all_reg_indexes[_reg.value] = _i +_register_arguments = [edi, esi, edx, ecx] + + class RegAlloc(BaseRegalloc): def __init__(self, assembler, translate_support_code=False): @@ -797,6 +800,25 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb + def consider_cond_call(self, op): + assert op.result is None + args = op.getarglist() + assert 2 <= len(args) <= 4 + 2 + tmpbox = TempBox() + self.rm.force_allocate_reg(tmpbox, selected_reg=eax) + v = args[1] + assert isinstance(v, Const) + imm = self.rm.convert_to_imm(v) + self.assembler.regalloc_mov(imm, eax) + args_so_far = [tmpbox] + for i in range(2, len(args)): + reg = _register_arguments[i - 2] + self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) + args_so_far.append(args[i]) + loc_cond = self.make_sure_var_in_reg(args[0], args) + self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax) + self.rm.possibly_free_var(tmpbox) + def consider_call_malloc_nursery(self, op): size_box = op.getarg(0) assert isinstance(size_box, ConstInt) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -89,8 +89,8 @@ _location_code = 's' def __init__(self, value, type): - assert value >= 0 - self.value = value + assert value >= 0 # accessing values < 0 is forbidden on x86-32. + self.value = value # (on x86-64 we could allow values down to -128) self.type = type def _getregkey(self): diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -190,7 +190,7 @@ return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None): + extraeffect=None, extradescrs=None): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -248,7 +248,8 @@ # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, - oopspecindex, can_invalidate, call_release_gil_target) + oopspecindex, can_invalidate, call_release_gil_target, + extradescrs) # assert effectinfo is not None if elidable or loopinvariant: diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -95,11 +95,13 @@ extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extra_descrs=None): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), frozenset_or_none(write_descrs_arrays), + frozenset_or_none(extra_descrs), extraeffect, oopspecindex, can_invalidate) @@ -132,6 +134,7 @@ result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex result.call_release_gil_target = call_release_gil_target + result.extra_descrs = extra_descrs if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE cls._cache[key] = result @@ -171,7 +174,8 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL): + call_release_gil_target=llmemory.NULL, + extra_descrs=None): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None @@ -220,7 +224,8 @@ extraeffect, oopspecindex, can_invalidate, - call_release_gil_target) + call_release_gil_target, + extra_descrs) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -358,11 +358,13 @@ else: raise AssertionError(kind) lst.append(v) - def handle_residual_call(self, op, extraargs=[], may_call_jitcodes=False): + def handle_residual_call(self, op, extraargs=[], may_call_jitcodes=False, + oopspecindex=EffectInfo.OS_NONE, extradescrs=None): """A direct_call turns into the operation 'residual_call_xxx' if it is calling a function that we don't want to JIT. The initial args of 'residual_call_xxx' are the function to call, and its calldescr.""" - calldescr = self.callcontrol.getcalldescr(op) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex, + extradescrs=extradescrs) op1 = self.rewrite_call(op, 'residual_call', [op.args[0]] + extraargs, calldescr=calldescr) if may_call_jitcodes or self.callcontrol.calldescr_canraise(calldescr): @@ -1339,6 +1341,24 @@ return [] return getattr(self, 'handle_jit_marker__%s' % key)(op, jitdriver) + def rewrite_op_jit_conditional_call(self, op): + have_floats = False + for arg in op.args: + if getkind(arg.concretetype) == 'float': + have_floats = True + break + if len(op.args) > 4 + 2 or have_floats: + raise Exception("Conditional call does not support floats or more than 4 arguments") + callop = SpaceOperation('direct_call', op.args[1:], op.result) + calldescr = self.callcontrol.getcalldescr(callop) + assert not calldescr.get_extra_info().check_forces_virtual_or_virtualizable() + op1 = self.rewrite_call(op, 'conditional_call', + op.args[:2], args=op.args[2:], + calldescr=calldescr) + if self.callcontrol.calldescr_canraise(calldescr): + op1 = [op1, SpaceOperation('-live-', [], None)] + return op1 + def handle_jit_marker__jit_merge_point(self, op, jitdriver): assert self.portal_jd is not None, ( "'jit_merge_point' in non-portal graph!") diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -9,6 +9,7 @@ from rpython.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper import rlist +from rpython.rtyper.lltypesystem import rlist as rlist_ll from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.llinterp import LLInterpreter @@ -211,6 +212,7 @@ _ll_5_list_ll_arraycopy = rgc.ll_arraycopy +_ll_3_list_resize_hint_really = rlist_ll._ll_list_resize_hint_really @elidable def _ll_1_gc_identityhash(x): diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -4,6 +4,7 @@ from rpython.jit.codewriter.flatten import GraphFlattener, ListOfKind, Register from rpython.jit.codewriter.format import assert_format from rpython.jit.codewriter import longlong +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import AbstractDescr from rpython.rtyper.lltypesystem import lltype, rclass, rstr, rffi from rpython.flowspace.model import SpaceOperation, Variable, Constant @@ -49,7 +50,7 @@ class FakeCPU: class tracker: pass - + def __init__(self, rtyper): rtyper._builtin_func_for_spec_cache = FakeDict() self.rtyper = rtyper @@ -71,7 +72,8 @@ callinfocollection = FakeCallInfoCollection() def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op, oopspecindex=None, extraeffect=None): + def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, + extraeffect=None, extradescrs=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): @@ -80,7 +82,7 @@ pass return FakeDescr(oopspecindex) def calldescr_canraise(self, calldescr): - return calldescr is not self._descr_cannot_raise and calldescr.oopspecindex is None + return calldescr is not self._descr_cannot_raise and calldescr.oopspecindex == EffectInfo.OS_NONE def get_vinfo(self, VTYPEPTR): return None @@ -93,7 +95,7 @@ if op.args[0].value._obj._name == 'jit_force_virtual': return 'residual' return 'builtin' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return FakeDescr() def calldescr_canraise(self, calldescr): return False diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -35,7 +35,7 @@ class FakeCPU: class tracker: pass - + rtyper = FakeRTyper() def calldescrof(self, FUNC, ARGS, RESULT): return ('calldescr', FUNC, ARGS, RESULT) @@ -60,7 +60,7 @@ class FakeResidualCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return 'calldescr' def calldescr_canraise(self, calldescr): return True @@ -77,7 +77,7 @@ class FakeResidualIndirectCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return 'calldescr' def calldescr_canraise(self, calldescr): return True @@ -87,7 +87,7 @@ return 'regular' def graphs_from(self, op): return ['somegraph1', 'somegraph2'] - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return 'calldescr' def get_jitcode(self, graph, called_from=None): assert graph in ('somegraph1', 'somegraph2') diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -250,7 +250,7 @@ class FakeCallControl: def guess_call_kind(self, op): return 'residual' - def getcalldescr(self, op): + def getcalldescr(self, op, **kwds): return FakeDescr() def calldescr_canraise(self, calldescr): return True diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1074,6 +1074,24 @@ def bhimpl_residual_call_irf_v(cpu, func, args_i,args_r,args_f,calldescr): return cpu.bh_call_v(func, args_i, args_r, args_f, calldescr) + # conditional calls - note that they cannot return stuff + @arguments("cpu", "i", "i", "I", "d") + def bhimpl_conditional_call_i_v(cpu, condition, func, args_i, calldescr): + if condition: + cpu.bh_call_v(func, args_i, None, None, calldescr) + + @arguments("cpu", "i", "i", "I", "R", "d") + def bhimpl_conditional_call_ir_v(cpu, condition, func, args_i, args_r, + calldescr): + if condition: + cpu.bh_call_v(func, args_i, args_r, None, calldescr) + + @arguments("cpu", "i", "i", "I", "R", "F", "d") + def bhimpl_conditional_call_irf_v(cpu, condition, func, args_i, args_r, + args_f, calldescr): + if condition: + cpu.bh_call_v(func, args_i, args_r, args_f, calldescr) + @arguments("cpu", "j", "R", returns="i") def bhimpl_inline_call_r_i(cpu, jitcode, args_r): return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -79,6 +79,11 @@ do_call_loopinvariant = do_call do_call_may_force = do_call +def do_cond_call(cpu, metainterp, argboxes, descr): + condbox = argboxes[0] + if condbox.getint(): + do_call(cpu, metainterp, argboxes[1:], descr) + def do_getarrayitem_gc(cpu, _, arraybox, indexbox, arraydescr): array = arraybox.getref_base() index = indexbox.getint() diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -106,7 +106,7 @@ rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST): return - if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT: + if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT or opnum == rop.COND_CALL: effectinfo = descr.get_extra_info() ef = effectinfo.extraeffect if (ef == effectinfo.EF_LOOPINVARIANT or diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -264,6 +264,7 @@ return if (opnum == rop.CALL or opnum == rop.CALL_PURE or + opnum == rop.COND_CALL or opnum == rop.CALL_MAY_FORCE or opnum == rop.CALL_RELEASE_GIL or opnum == rop.CALL_ASSEMBLER): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -250,7 +250,7 @@ # This is only safe if the class of the guard_value matches the # class of the guard_*_class, otherwise the intermediate ops might # be executed with wrong classes. - previous_classbox = value.get_constant_class(self.optimizer.cpu) + previous_classbox = value.get_constant_class(self.optimizer.cpu) expected_classbox = self.optimizer.cpu.ts.cls_of_box(op.getarg(1)) assert previous_classbox is not None assert expected_classbox is not None @@ -343,6 +343,15 @@ resvalue = self.getvalue(op.result) self.loop_invariant_results[key] = resvalue + def optimize_COND_CALL(self, op): + arg = op.getarg(0) + val = self.getvalue(arg) + if val.is_constant(): + if val.box.same_constant(CONST_0): + return + op = op.copy_and_change(rop.CALL, args=op.getarglist()[1:]) + self.emit_operation(op) + def _optimize_nullness(self, op, box, expect_nonnull): value = self.getvalue(box) if value.is_nonnull(): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -54,7 +54,7 @@ expected_short = self.parse(expected_short) preamble = self.unroll_and_optimize(loop, call_pure_results) - + # print print "Preamble:" @@ -219,7 +219,7 @@ """ self.optimize_loop(ops, expected) - def test_reverse_of_cast_2(self): + def test_reverse_of_cast_2(self): ops = """ [p0] i1 = cast_ptr_to_int(p0) @@ -1290,7 +1290,7 @@ p30 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p30, i28, descr=nextdescr) setfield_gc(p3, p30, descr=valuedescr) - p46 = same_as(p30) # This same_as should be killed by backend + p46 = same_as(p30) # This same_as should be killed by backend jump(i29, p30, p3) """ expected = """ @@ -2582,7 +2582,7 @@ p2 = new_with_vtable(ConstClass(node_vtable)) setfield_gc(p2, p4, descr=nextdescr) setfield_gc(p1, p2, descr=nextdescr) - i101 = same_as(i4) + i101 = same_as(i4) jump(p1, i2, i4, p4, i101) """ expected = """ @@ -3440,7 +3440,7 @@ setfield_gc(p1, i1, descr=valuedescr) i3 = call_assembler(i1, descr=asmdescr) setfield_gc(p1, i3, descr=valuedescr) - i143 = same_as(i3) # Should be killed by backend + i143 = same_as(i3) # Should be killed by backend jump(p1, i4, i3) ''' self.optimize_loop(ops, ops, preamble) @@ -3551,7 +3551,7 @@ escape(i2) i4 = call(123456, 4, i0, 6, descr=plaincalldescr) guard_no_exception() [] - i155 = same_as(i4) + i155 = same_as(i4) jump(i0, i4, i155) ''' expected = ''' @@ -6000,7 +6000,7 @@ [p1, i1, i2, i3] escape(i3) i4 = int_sub(i2, i1) - i5 = same_as(i4) + i5 = same_as(i4) jump(p1, i1, i2, i4, i5) """ expected = """ @@ -7258,7 +7258,7 @@ [i0] i2 = int_lt(i0, 10) guard_true(i2) [] - i1 = int_add(i0, 1) + i1 = int_add(i0, 1) jump(i1) """ self.optimize_loop(ops, expected) @@ -7976,7 +7976,7 @@ jump(i0, p0, i2) """ self.optimize_loop(ops, expected) - + def test_constant_failargs(self): ops = """ [p1, i2, i3] @@ -8057,7 +8057,7 @@ jump() """ self.optimize_loop(ops, expected) - + def test_issue1080_infinitie_loop_simple(self): ops = """ @@ -8089,8 +8089,8 @@ def test_licm_boxed_opaque_getitem(self): ops = """ [p1] - p2 = getfield_gc(p1, descr=nextdescr) - mark_opaque_ptr(p2) + p2 = getfield_gc(p1, descr=nextdescr) + mark_opaque_ptr(p2) guard_class(p2, ConstClass(node_vtable)) [] i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) @@ -8106,8 +8106,8 @@ def test_licm_boxed_opaque_getitem_unknown_class(self): ops = """ [p1] - p2 = getfield_gc(p1, descr=nextdescr) - mark_opaque_ptr(p2) + p2 = getfield_gc(p1, descr=nextdescr) + mark_opaque_ptr(p2) i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) jump(p1) @@ -8123,7 +8123,7 @@ def test_licm_unboxed_opaque_getitem(self): ops = """ [p2] - mark_opaque_ptr(p2) + mark_opaque_ptr(p2) guard_class(p2, ConstClass(node_vtable)) [] i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) @@ -8139,22 +8139,20 @@ def test_licm_unboxed_opaque_getitem_unknown_class(self): ops = """ [p2] - mark_opaque_ptr(p2) + mark_opaque_ptr(p2) i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) jump(p2) """ expected = """ [p2] - i3 = getfield_gc(p2, descr=otherdescr) + i3 = getfield_gc(p2, descr=otherdescr) i4 = call(i3, descr=nonwritedescr) jump(p2) """ self.optimize_loop(ops, expected) - - - def test_only_strengthen_guard_if_class_matches(self): + def test_only_strengthen_guard_if_class_matches_2(self): ops = """ [p1] guard_class(p1, ConstClass(node_vtable2)) [] @@ -8164,6 +8162,30 @@ self.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_cond_call_with_a_constant(self): + ops = """ + [p1] + cond_call(1, 123, p1, descr=plaincalldescr) + jump(p1) + """ + expected = """ + [p1] + call(123, p1, descr=plaincalldescr) + jump(p1) + """ + self.optimize_loop(ops, expected) + + def test_cond_call_with_a_constant_2(self): + ops = """ + [p1] + cond_call(0, 123, p1, descr=plaincalldescr) + jump(p1) + """ + expected = """ + [p1] + jump(p1) + """ + self.optimize_loop(ops, expected) class TestLLtype(OptimizeOptTest, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -856,6 +856,21 @@ opimpl_residual_call_irf_f = _opimpl_residual_call3 opimpl_residual_call_irf_v = _opimpl_residual_call3 + @arguments("box", "box", "boxes", "descr", "orgpc") + def opimpl_conditional_call_i_v(self, condbox, funcbox, argboxes, calldescr, + pc): + self.do_conditional_call(condbox, funcbox, argboxes, calldescr, pc) + + @arguments("box", "box", "boxes2", "descr", "orgpc") + def opimpl_conditional_call_ir_v(self, condbox, funcbox, argboxes, + calldescr, pc): + self.do_conditional_call(condbox, funcbox, argboxes, calldescr, pc) + + @arguments("box", "box", "boxes3", "descr", "orgpc") + def opimpl_conditional_call_irf_v(self, condbox, funcbox, argboxes, + calldescr, pc): + self.do_conditional_call(condbox, funcbox, argboxes, calldescr, pc) + @arguments("int", "boxes3", "boxes3", "orgpc") def _opimpl_recursive_call(self, jdindex, greenboxes, redboxes, pc): targetjitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] @@ -1204,7 +1219,7 @@ CIF_DESCRIPTION_P) kind, descr, itemsize = get_arg_descr(self.metainterp.cpu, cif_description.rtype) - + if kind != 'v': ofs = cif_description.exchange_result assert ofs % itemsize == 0 # alignment check (result) @@ -1339,12 +1354,7 @@ self.metainterp.assert_no_exception() return resbox - def do_residual_call(self, funcbox, argboxes, descr, pc, - assembler_call=False, - assembler_call_jd=None): - # First build allboxes: it may need some reordering from the - # list provided in argboxes, depending on the order in which - # the arguments are expected by the function + def _build_allboxes(self, funcbox, argboxes, descr): allboxes = [None] * (len(argboxes)+1) allboxes[0] = funcbox src_i = src_r = src_f = 0 @@ -1373,7 +1383,16 @@ allboxes[i] = box i += 1 assert i == len(allboxes) + return allboxes + + def do_residual_call(self, funcbox, argboxes, descr, pc, + assembler_call=False, + assembler_call_jd=None): + # First build allboxes: it may need some reordering from the + # list provided in argboxes, depending on the order in which + # the arguments are expected by the function # + allboxes = self._build_allboxes(funcbox, argboxes, descr) effectinfo = descr.get_extra_info() if (assembler_call or effectinfo.check_forces_virtual_or_virtualizable()): @@ -1413,6 +1432,15 @@ pure = effectinfo.check_is_elidable() return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + def do_conditional_call(self, condbox, funcbox, argboxes, descr, pc): + allboxes = self._build_allboxes(funcbox, argboxes, descr) + effectinfo = descr.get_extra_info() + assert not effectinfo.check_forces_virtual_or_virtualizable() + exc = effectinfo.check_can_raise() + pure = effectinfo.check_is_elidable() + return self.execute_varargs(rop.COND_CALL, [condbox] + allboxes, descr, + exc, pure) + def _do_jit_force_virtual(self, allboxes, descr, pc): assert len(allboxes) == 2 if (self.metainterp.jitdriver_sd.virtualizable_info is None and diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -513,6 +513,7 @@ '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', 'CALL/*d', + 'COND_CALL/*d', # a conditional call, with first argument as a condition 'CALL_ASSEMBLER/*d', # call already compiled assembler 'CALL_MAY_FORCE/*d', 'CALL_LOOPINVARIANT/*d', diff --git a/rpython/jit/metainterp/test/test_call.py b/rpython/jit/metainterp/test/test_call.py --- a/rpython/jit/metainterp/test/test_call.py +++ b/rpython/jit/metainterp/test/test_call.py @@ -24,10 +24,10 @@ res = self.interp_operations(f, [3]) assert res == f(3) - + def test_call_elidable_none(self): d = {} - + @jit.elidable def f(a): return d.get(a, None) @@ -43,3 +43,16 @@ return 3 self.meta_interp(main, [10]) + + def test_cond_call(self): + def f(l, n): + l.append(n) + + def main(n): + l = [] + jit.conditional_call(n == 10, f, l, n) + return len(l) + + assert self.interp_operations(main, [10]) == 1 + assert self.interp_operations(main, [5]) == 0 + diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -315,4 +315,34 @@ assert res == f(37) # There is the one actual field on a, plus several fields on the list # itself - self.check_resops(getfield_gc=10) + self.check_resops(getfield_gc=7) + + def test_conditional_call_append(self): + jitdriver = JitDriver(greens = [], reds = 'auto') + + def f(n): + l = [] + while n > 0: + jitdriver.jit_merge_point() + l.append(n) + n -= 1 + return len(l) + + res = self.meta_interp(f, [10]) + assert res == 10 + self.check_resops(call=0, cond_call=2) + + def test_conditional_call_pop(self): + jitdriver = JitDriver(greens = [], reds = 'auto') + + def f(n): + l = range(n) + while n > 0: + jitdriver.jit_merge_point() + l.pop() + n -= 1 + return len(l) + + res = self.meta_interp(f, [10]) + assert res == 0 + self.check_resops(call=0, cond_call=2) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -990,6 +990,33 @@ return hop.genop('jit_record_known_class', [v_inst, v_cls], resulttype=lltype.Void) +def _jit_conditional_call(condition, function, *args): + pass + + at specialize.ll_and_arg(1) +def conditional_call(condition, function, *args): + if we_are_jitted(): + _jit_conditional_call(condition, function, *args) + else: + if condition: + function(*args) + +class ConditionalCallEntry(ExtRegistryEntry): + _about_ = _jit_conditional_call + + def compute_result_annotation(self, *args_s): + self.bookkeeper.emulate_pbc_call(self.bookkeeper.position_key, + args_s[1], args_s[2:]) + + def specialize_call(self, hop): + from rpython.rtyper.lltypesystem import lltype + + args_v = hop.inputargs(lltype.Bool, lltype.Void, *hop.args_r[2:]) + args_v[1] = hop.args_r[1].get_concrete_llfn(hop.args_s[1], + hop.args_s[2:], hop.spaceop) + hop.exception_is_here() + return hop.genop('jit_conditional_call', args_v) + class Counters(object): counters=""" TRACING diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -529,6 +529,9 @@ def op_jit_record_known_class(self, *args): pass + def op_jit_conditional_call(self, *args): + raise NotImplementedError("should not be called while not jitted") + def op_get_exception_addr(self, *args): raise NotImplementedError diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -446,6 +446,7 @@ 'jit_force_quasi_immutable': LLOp(canrun=True), 'jit_record_known_class' : LLOp(canrun=True), 'jit_ffi_save_result': LLOp(canrun=True), + 'jit_conditional_call': LLOp(), 'get_exception_addr': LLOp(), 'get_exc_value_addr': LLOp(), 'do_malloc_fixedsize_clear':LLOp(canmallocgc=True), diff --git a/rpython/rtyper/lltypesystem/rlist.py b/rpython/rtyper/lltypesystem/rlist.py --- a/rpython/rtyper/lltypesystem/rlist.py +++ b/rpython/rtyper/lltypesystem/rlist.py @@ -175,6 +175,7 @@ # adapted C code + at jit.look_inside_iff(lambda l, newsize, overallocate: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) @signature(types.any(), types.int(), types.bool(), returns=types.none()) def _ll_list_resize_hint_really(l, newsize, overallocate): """ @@ -217,7 +218,7 @@ rgc.ll_arraycopy(items, newitems, 0, 0, p) l.items = newitems - at jit.dont_look_inside + at jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) def _ll_list_resize_hint(l, newsize): """Ensure l.items has room for at least newsize elements without setting l.length to newsize. @@ -231,7 +232,6 @@ if allocated < newsize or newsize < (allocated >> 1) - 5: _ll_list_resize_hint_really(l, newsize, False) - @signature(types.any(), types.int(), types.bool(), returns=types.none()) def _ll_list_resize_really(l, newsize, overallocate): """ @@ -252,30 +252,34 @@ _ll_list_resize_really(l, newsize, False) - at jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) - at jit.oopspec("list._resize_ge(l, newsize)") def _ll_list_resize_ge(l, newsize): """This is called with 'newsize' larger than the current length of the list. If the list storage doesn't have enough space, then really perform a realloc(). In the common case where we already overallocated enough, then this is a very fast operation. """ - if len(l.items) >= newsize: - l.length = newsize + cond = len(l.items) < newsize + if jit.isconstant(len(l.items)) and jit.isconstant(newsize): + if cond: + _ll_list_resize_hint_really(l, newsize, True) else: - _ll_list_resize_really(l, newsize, True) + jit.conditional_call(cond, + _ll_list_resize_hint_really, l, newsize, True) + l.length = newsize - at jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize)) - at jit.oopspec("list._resize_le(l, newsize)") def _ll_list_resize_le(l, newsize): """This is called with 'newsize' smaller than the current length of the list. If 'newsize' falls lower than half the allocated size, proceed with the realloc() to shrink the list. """ - if newsize >= (len(l.items) >> 1) - 5: - l.length = newsize + cond = newsize < (len(l.items) >> 1) - 5 + if jit.isconstant(len(l.items)) and jit.isconstant(newsize): + if cond: + _ll_list_resize_hint_really(l, newsize, False) else: - _ll_list_resize_really(l, newsize, False) + jit.conditional_call(cond, _ll_list_resize_hint_really, l, newsize, + False) + l.length = newsize def ll_append_noresize(l, newitem): length = l.length diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -283,9 +283,13 @@ assert len(self.s_pbc.descriptions) == 1 # lowleveltype wouldn't be Void otherwise funcdesc, = self.s_pbc.descriptions - if len(self.callfamily.calltables) != 1: + tables = [] # find the simple call in the calltable + for key, table in self.callfamily.calltables.items(): + if not key[1] and not key[2] and not key[3]: + tables.append(table) + if len(tables) != 1: raise TyperError("cannot pass a function with various call shapes") - table, = self.callfamily.calltables.values() + table, = tables graphs = [] for row in table: if funcdesc in row: @@ -298,6 +302,18 @@ llfn = self.rtyper.getcallable(graph) return inputconst(typeOf(llfn), llfn) + def get_concrete_llfn(self, s_pbc, args_s, op): + bk = self.rtyper.annotator.bookkeeper + descs = list(s_pbc.descriptions) + vfcs = description.FunctionDesc.variant_for_call_site + args = bk.build_args("simple_call", args_s) + shape, index = vfcs(bk, self.callfamily, descs, args, op) + funcdesc, = descs + row_of_one_graph = self.callfamily.calltables[shape][index] + graph = row_of_one_graph[funcdesc] + llfn = self.rtyper.getcallable(graph) + return inputconst(typeOf(llfn), llfn) + def rtype_simple_call(self, hop): return self.call('simple_call', hop) diff --git a/rpython/translator/backendopt/test/test_all.py b/rpython/translator/backendopt/test/test_all.py --- a/rpython/translator/backendopt/test/test_all.py +++ b/rpython/translator/backendopt/test/test_all.py @@ -47,6 +47,8 @@ def translateopt(self, func, sig, **optflags): t = TranslationContext() + opts = {'translation.list_comprehension_operations': True} + t.config.set(**opts) t.buildannotator().build_types(func, sig) t.buildrtyper(type_system=self.type_system).specialize() if option.view: diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -29,7 +29,7 @@ __slots__ = """graph db gcpolicy exception_policy more_ll_values - vars all_cached_consts + vars all_cached_consts illtypes functionname blocknum @@ -59,7 +59,7 @@ if isinstance(T, Ptr) and T.TO.__class__ == ForwardReference: continue db.gettype(T) # force the type to be considered by the database - + self.illtypes = None def collect_var_and_types(self): @@ -90,7 +90,7 @@ for cleanupop in exc_cleanup_ops: mix.extend(cleanupop.args) mix.append(cleanupop.result) - + uniquemix = [] seen = identity_dict() for v in mix: @@ -454,6 +454,9 @@ fnexpr = '((%s)%s)' % (cdecl(typename, ''), self.expr(fnaddr)) return self.generic_call(FUNC, fnexpr, op.args[1:], op.result) + def OP_JIT_CONDITIONAL_CALL(self, op): + return 'abort(); /* jit_conditional_call */' + # low-level operations def generic_get(self, op, sourceexpr): T = self.lltypemap(op.result) @@ -580,7 +583,7 @@ def OP_PTR_ISZERO(self, op): return '%s = (%s == NULL);' % (self.expr(op.result), self.expr(op.args[0])) - + def OP_PTR_EQ(self, op): return '%s = (%s == %s);' % (self.expr(op.result), self.expr(op.args[0]), @@ -627,7 +630,7 @@ ARRAY = self.lltypemap(op.args[0]).TO if ARRAY._hints.get("render_as_void"): return '%s = (char *)%s + %s;' % ( - self.expr(op.result), + self.expr(op.result), self.expr(op.args[0]), self.expr(op.args[1])) else: @@ -652,7 +655,7 @@ def OP_CAST_INT_TO_PTR(self, op): TYPE = self.lltypemap(op.result) typename = self.db.gettype(TYPE) - return "%s = (%s)%s;" % (self.expr(op.result), cdecl(typename, ""), + return "%s = (%s)%s;" % (self.expr(op.result), cdecl(typename, ""), self.expr(op.args[0])) def OP_SAME_AS(self, op): @@ -711,7 +714,7 @@ val = "(unsigned char)%s" % val elif ORIG is UniChar: val = "(unsigned long)%s" % val - typename = cdecl(self.db.gettype(TYPE), '') + typename = cdecl(self.db.gettype(TYPE), '') return "%(result)s = (%(typename)s)(%(val)s);" % locals() OP_FORCE_CAST = OP_CAST_PRIMITIVE # xxx the same logic works @@ -823,7 +826,7 @@ counter_label+1) counter_label = self.expr(op.args[1]) return 'PYPY_INSTRUMENT_COUNT(%s);' % counter_label - + def OP_IS_EARLY_CONSTANT(self, op): return '%s = 0; /* IS_EARLY_CONSTANT */' % (self.expr(op.result),) diff --git a/rpython/translator/goal/targetjitstandalone.py b/rpython/translator/goal/targetjitstandalone.py new file mode 100644 --- /dev/null +++ b/rpython/translator/goal/targetjitstandalone.py @@ -0,0 +1,44 @@ + +""" Only the JIT +""" + +from rpython.rlib import jit +from rpython.jit.codewriter.policy import JitPolicy + +driver = jit.JitDriver(greens = [], reds = 'auto') +driver2 = jit.JitDriver(greens = [], reds = 'auto') + +def main(count): + i = 0 + l = [] + while i < count: + driver.jit_merge_point() + l.append(i) + i += 1 + l = main2(l, count) + return l + +def main2(l, count): + i = 0 + while i < count: + driver2.jit_merge_point() + l.pop() + i += 1 + return l + +def entry_point(argv): + if len(argv) < 3: + print "Usage: jitstandalone " + count1 = int(argv[1]) + count2 = int(argv[2]) + s = 0 + for i in range(count1): + s += len(main(count2)) + print s + return 0 + +def target(*args): + return entry_point, None + +def jitpolicy(driver): + return JitPolicy() From noreply at buildbot.pypy.org Sun Jul 28 21:37:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 21:37:22 +0200 (CEST) Subject: [pypy-commit] pypy default: fixes Message-ID: <20130728193722.95B8E1C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65752:110dab59f002 Date: 2013-07-28 21:36 +0200 http://bitbucket.org/pypy/pypy/changeset/110dab59f002/ Log: fixes diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -7,11 +7,18 @@ import py, os +try: + py.test.config.option.runappdirect +except AttributeError: + import sys + print >> sys.stderr, __doc__ + sys.exit(2) + from pypy.objspace.std import Space from rpython.config.translationoption import set_opt_level from pypy.config.pypyoption import get_pypy_config, set_pypy_opt_level from pypy.objspace.std import multimethod -from rpython.rtyper.annlowlevel import llhelper, llstr, oostr, hlstr +from rpython.rtyper.annlowlevel import llhelper, llstr, hlstr from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import lltype from pypy.interpreter.pycode import PyCode From noreply at buildbot.pypy.org Sun Jul 28 21:39:28 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 21:39:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove some parameters that became unused over the lfietime of this branch. Message-ID: <20130728193928.283F51C00D8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65753:c454418f5a34 Date: 2013-07-28 12:37 -0700 http://bitbucket.org/pypy/pypy/changeset/c454418f5a34/ Log: Remove some parameters that became unused over the lfietime of this branch. diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -190,7 +190,7 @@ return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None, extradescrs=None): + extraeffect=None): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -249,7 +249,7 @@ effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, oopspecindex, can_invalidate, call_release_gil_target, - extradescrs) + ) # assert effectinfo is not None if elidable or loopinvariant: diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -95,13 +95,11 @@ extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL, - extra_descrs=None): + call_release_gil_target=llmemory.NULL): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), frozenset_or_none(write_descrs_arrays), - frozenset_or_none(extra_descrs), extraeffect, oopspecindex, can_invalidate) @@ -134,7 +132,6 @@ result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex result.call_release_gil_target = call_release_gil_target - result.extra_descrs = extra_descrs if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE cls._cache[key] = result @@ -174,8 +171,7 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL, - extra_descrs=None): + call_release_gil_target=llmemory.NULL): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None @@ -224,8 +220,7 @@ extraeffect, oopspecindex, can_invalidate, - call_release_gil_target, - extra_descrs) + call_release_gil_target) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -359,12 +359,11 @@ lst.append(v) def handle_residual_call(self, op, extraargs=[], may_call_jitcodes=False, - oopspecindex=EffectInfo.OS_NONE, extradescrs=None): + oopspecindex=EffectInfo.OS_NONE): """A direct_call turns into the operation 'residual_call_xxx' if it is calling a function that we don't want to JIT. The initial args of 'residual_call_xxx' are the function to call, and its calldescr.""" - calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex, - extradescrs=extradescrs) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex) op1 = self.rewrite_call(op, 'residual_call', [op.args[0]] + extraargs, calldescr=calldescr) if may_call_jitcodes or self.callcontrol.calldescr_canraise(calldescr): diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -73,7 +73,7 @@ def guess_call_kind(self, op): return 'residual' def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None, extradescrs=None): + extraeffect=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): From noreply at buildbot.pypy.org Sun Jul 28 21:39:29 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 21:39:29 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20130728193929.70C7A1C00D8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65754:fe9ac340d631 Date: 2013-07-28 12:38 -0700 http://bitbucket.org/pypy/pypy/changeset/fe9ac340d631/ Log: merged upstream diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -7,11 +7,18 @@ import py, os +try: + py.test.config.option.runappdirect +except AttributeError: + import sys + print >> sys.stderr, __doc__ + sys.exit(2) + from pypy.objspace.std import Space from rpython.config.translationoption import set_opt_level from pypy.config.pypyoption import get_pypy_config, set_pypy_opt_level from pypy.objspace.std import multimethod -from rpython.rtyper.annlowlevel import llhelper, llstr, oostr, hlstr +from rpython.rtyper.annlowlevel import llhelper, llstr, hlstr from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import lltype from pypy.interpreter.pycode import PyCode From noreply at buildbot.pypy.org Sun Jul 28 21:44:49 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 21:44:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed some more references to ootypesystem. Message-ID: <20130728194449.4E3F11C00D8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65755:1cada6d5b7b2 Date: 2013-07-28 12:44 -0700 http://bitbucket.org/pypy/pypy/changeset/1cada6d5b7b2/ Log: Removed some more references to ootypesystem. diff --git a/rpython/rtyper/test/test_rvirtualizable2.py b/rpython/rtyper/test/test_rvirtualizable2.py --- a/rpython/rtyper/test/test_rvirtualizable2.py +++ b/rpython/rtyper/test/test_rvirtualizable2.py @@ -57,7 +57,7 @@ block = graph.startblock op_promote = block.operations[-2] op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' v_inst = op_getfield.args[0] assert op_promote.opname == 'jit_force_virtualizable' assert op_promote.args[0] is v_inst @@ -72,7 +72,7 @@ block = graph.startblock op_promote = block.operations[-2] op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' v_inst = op_getfield.args[0] assert op_promote.opname == 'jit_force_virtualizable' assert op_promote.args[0] is v_inst @@ -86,7 +86,7 @@ block = graph.startblock op_getfield = block.operations[-1] op_call = block.operations[-2] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' assert op_call.opname == 'direct_call' # to V.__init__ def test_generate_force_virtualizable_array(self): @@ -99,7 +99,7 @@ op_getfield = block.operations[-2] op_getarrayitem = block.operations[-1] assert op_getarrayitem.opname == 'direct_call' # to ll_getitem_xxx - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' v_inst = op_getfield.args[0] assert op_promote.opname == 'jit_force_virtualizable' assert op_promote.args[0] is v_inst @@ -168,13 +168,13 @@ _, rtyper, graph = self.gengraph(fn, [int]) block = graph.startblock op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' funcptr = self.replace_force_virtualizable(rtyper, [graph]) if getattr(option, 'view', False): graph.show() op_promote = block.operations[-2] op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' assert op_promote.opname == 'direct_call' assert op_promote.args[0].value == funcptr assert op_promote.args[1] == op_getfield.args[0] From noreply at buildbot.pypy.org Sun Jul 28 21:51:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 21:51:27 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: hg merge default Message-ID: <20130728195127.1D1DC1C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: kill-gen-store-back-in Changeset: r65756:864d702ea2fd Date: 2013-07-28 21:48 +0200 http://bitbucket.org/pypy/pypy/changeset/864d702ea2fd/ Log: hg merge default diff too long, truncating to 2000 out of 47771 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,11 +48,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "_sha", "cStringIO", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -340,10 +335,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -351,10 +342,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -85,14 +85,9 @@ from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ @@ -123,7 +118,6 @@ .. _`testing methods`: coding-guide.html#testing-in-pypy .. _`translation`: translation.html .. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ @@ -258,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -300,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -84,12 +84,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -72,8 +72,6 @@ `command line reference`_ -`CLI backend`_ describes the details of the .NET backend. - `JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler from our Python interpreter. diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -3,14 +3,20 @@ =============== We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. -This beta does not add any new features to the 2.1 release, but contains several bugfixes listed below. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html Highlights ========== +* Support for os.statvfs and os.fstatvfs on unix systems. + * Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). -* Fixed issue `1552`_: GreenletExit should inherit from BaseException +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. * Fixed issue `1537`_: numpypy __array_interface__ diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -432,226 +432,6 @@ See for example `rpython/rtyper/rlist.py`_. -.. _`oo type`: - -Object Oriented Types ---------------------- - -The standard `low-level type` model described above is fine for -targeting low level backends such as C, but it is not good -enough for targeting higher level backends such as .NET CLI or Java -JVM, so a new object oriented model has been introduced. This model is -implemented in the first part of `rpython/rtyper/ootypesystem/ootype.py`_. - -As for the low-level typesystem, the second part of -`rpython/rtyper/ootypesystem/ootype.py`_ is a runnable implementation of -these types, for testing purposes. - - -The target platform -+++++++++++++++++++ - -There are plenty of object oriented languages and platforms around, -each one with its own native features: they could be statically or -dynamically typed, they could support or not things like multiple -inheritance, classes and functions as first class order objects, -generics, and so on. - -The goal of *ootypesystem* is to define a trade-off between all -the potential backends that let them to use the native facilities when -available while not preventing other backends to work when they -aren't. - - -Types and classes -+++++++++++++++++ - -Most of the primitive types defined in *ootypesystem* are the very -same of those found in *lltypesystem*: ``Bool``, ``Signed``, -``Unsigned``, ``Float``, ``Char``, ``UniChar`` and ``Void``. - -The target platform is supposed to support classes and instances with -**single inheritance**. Instances of user-defined classes are mapped -to the ``Instance`` type, whose ``_superclass`` attribute indicates -the base class of the instance. At the very beginning of the -inheritance hierarchy there is the ``Root`` object, i.e. the common -base class between all instances; if the target platform has the -notion of a common base class too, the backend can choose to map the -``Root`` class to its native equivalent. - -Object of ``Instance`` type can have attributes and methods: -attributes are got and set by the ``oogetfield`` and ``oosetfield`` -operations, while method calls are expressed by the ``oosend`` -operation. - -Classes are passed around using the ``Class`` type: this is a first -order class type whose only goal is to allow **runtime instantiation** -of the class. Backends that don't support this feature natively, such -as Java, may need to use some sort of placeholder instead. - - -Static vs. dynamic typing -+++++++++++++++++++++++++ - -The target platform is assumed to be **statically typed**, i.e. the -type of each object is known at compile time. - -As usual, it is possible to convert an object from type to type only -under certain conditions; there is a number of predefined conversions -between primitive types such as from ``Bool`` to ``Signed`` or from -``Signed`` to ``Float``. For each one of these conversions there is a -corresponding low level operation, such as ``cast_bool_to_int`` and -``cast_int_to_float``. - -Moreover it is possible to cast instances of a class up and down the -inheritance hierarchy with the ``ooupcast`` and ``oodowncast`` low -level operations. Implicit upcasting is not allowed, so you really -need to do a ``ooupcast`` for converting from a subclass to a -superclass. - -With this design statically typed backends can trivially insert -appropriate casts when needed, while dynamically typed backends can -simply ignore some of the operation such as ``ooupcast`` and -``oodowncast``. Backends that supports implicit upcasting, such as CLI -and Java, can simply ignore only ``ooupcast``. - -Object model -++++++++++++ - -The object model implemented by ootype is quite Java-like. The -following is a list of key features of the ootype object model which -have a direct correspondence in the Java or .NET object model: - - - classes have a static set of strongly typed methods and - attributes; - - - methods can be overriden in subclasses; every method is "virtual" - (i.e., can be overridden); methods can be "abstract" (i.e., need - to be overridden in subclasses); - - - classes support single inheritance; all classes inherit directly - or indirectly from the ROOT class; - - - there is some support for method overloading. This feature is not - used by the RTyper itself because RPython doesn't support method - overloading, but it is used by the GenCLI backend for offering - access to the native .NET libraries (see XXX); - - - all classes, attributes and methods are public: ootype is only - used internally by the translator, so there is no need to enforce - accessibility rules; - - - classes and functions are first-class order objects: this feature - can be easily simulated by backends for platforms on which it is not - a native feature; - - - there is a set of `built-in types`_ offering standard features. - -Exception handling -++++++++++++++++++ - -Since flow graphs are meant to be used also for very low level -backends such as C, they are quite unstructured: this means that the -target platform doesn't need to have a native exception handling -mechanism, since at the very least the backend can handle exceptions -just like ``genc`` does. - -By contrast we know that most of high level platforms natively support -exception handling, so *ootypesystem* is designed to let them to use -it. In particular the exception instances are typed with the -``Instance`` type, so the usual inheritance exception hierarchy is -preserved and the native way to catch exception should just work. - -.. `built-in types`_ - -Built-in types -++++++++++++++ - -It seems reasonable to assume high level platforms to provide built-in -facilities for common types such as *lists* or *hashtables*. - -RPython standard types such as ``List`` and ``Dict`` are implemented -on top of these common types; at the moment of writing there are six -built-in types: - - - **String**: self-descriptive - - - **StringBuilder**: used for dynamic building of string - - - **List**: a variable-sized, homogeneous list of object - - - **Dict**: a hashtable of homogeneous keys and values - - - **CustomDict**: same as dict, but with custom equal and hash - functions - - - **DictItemsIterator**: a helper class for iterating over the - elements of a ``Dict`` - - -Each of these types is a subtype of ``BuiltinADTType`` and has set of -ADT (Abstract Data Type) methods (hence the name of the base class) -for being manipulated. Examples of ADT methods are ``ll_length`` for -``List`` and ``ll_get`` for ``Dict``. - -From the backend point of view an instance of a built-in types is -treated exactly as a plain ``Instance``, so usually no special-casing -is needed. The backend is supposed to provide a bunch of classes -wrapping the native ones in order to provide the right signature and -semantic for the ADT methods. - -As an alternative, backends can special-case the ADT types to map them -directly to the native equivalent, translating the method names -on-the-fly at compile time. - -Generics -++++++++ - -Some target platforms offer native support for **generics**, i.e. -classes that can be parametrized on types, not only values. For -example, if one wanted to create a list using generics, a possible -declaration would be to say ``List``, where ``T`` represented the -type. When instantiated, one could create ``List`` or -``List``. The list is then treated as a list of whichever type -is specified. - -Each subclass of ``BuiltinADTTypes`` defines a bunch of type -parameters by creating some class level placeholder in the form of -``PARAMNAME_T``; then it fills up the ``_GENERIC_METHODS`` attribute -by defining the signature of each of the ADT methods using those -placeholders in the appropriate places. As an example, here is an -extract of *ootypesystem*'s List type:: - - class List(BuiltinADTType): - # placeholders for types - SELFTYPE_T = object() - ITEMTYPE_T = object() - - ... - - def _init_methods(self): - # 'ITEMTYPE_T' is used as a placeholder for indicating - # arguments that should have ITEMTYPE type. 'SELFTYPE_T' indicates 'self' - - self._GENERIC_METHODS = frozendict({ - "ll_length": Meth([], Signed), - "ll_getitem_fast": Meth([Signed], self.ITEMTYPE_T), - "ll_setitem_fast": Meth([Signed, self.ITEMTYPE_T], Void), - "_ll_resize_ge": Meth([Signed], Void), - "_ll_resize_le": Meth([Signed], Void), - "_ll_resize": Meth([Signed], Void), - }) - - ... - -Thus backends that support generics can simply look for placeholders -for discovering where the type parameters are used. Backends that -don't support generics can simply use the ``Root`` class instead and -insert the appropriate casts where needed. Note that placeholders -might also stand for primitive types, which typically require more -involved casts: e.g. in Java, making wrapper objects around ints. - - HighLevelOp interface --------------------- diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst From noreply at buildbot.pypy.org Sun Jul 28 21:51:40 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 21:51:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed yet more code and docs that were ootype specific. Message-ID: <20130728195140.06A901C00D8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65757:deab847b00d8 Date: 2013-07-28 12:51 -0700 http://bitbucket.org/pypy/pypy/changeset/deab847b00d8/ Log: Removed yet more code and docs that were ootype specific. diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,20 +65,17 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code @@ -93,31 +90,31 @@ `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -559,17 +559,6 @@ interpret_raises(IndexError, raise_exception, [42]) interpret_raises(ValueError, raise_exception, [43]) -By default the ``interpret`` and ``interpret_raises`` functions use -the low-level typesystem. If you want to use the object oriented one -you have to set the ``type_system`` parameter to the string -``'ootype'``:: - - def test_invert(): - def f(x): - return ~x - res = interpret(f, [3], type_system='ootype') - assert res == ~3 - .. _annotator: translation.html#the-annotation-pass .. include:: _ref.txt diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -2108,8 +2108,7 @@ def identityhash(p): """Returns the lltype-level hash of the given GcStruct. - Also works with most ootype objects. Not for NULL. - See rlib.objectmodel.compute_identity_hash() for more + Not for NULL. See rlib.objectmodel.compute_identity_hash() for more information about the RPython-level meaning of this. """ assert p diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -503,9 +503,7 @@ return l -# return a nullptr() if lst is a list of pointers it, else None. Note -# that if we are using ootypesystem there are not pointers, so we -# always return None. +# return a nullptr() if lst is a list of pointers it, else None. def ll_null_item(lst): LIST = typeOf(lst) if isinstance(LIST, Ptr): @@ -516,25 +514,15 @@ def listItemType(lst): LIST = typeOf(lst) - if isinstance(LIST, Ptr): # lltype - LIST = LIST.TO - return LIST.ITEM + return LIST.TO.ITEM @signature(types.any(), types.any(), types.int(), types.int(), types.int(), returns=types.none()) def ll_arraycopy(source, dest, source_start, dest_start, length): SRCTYPE = typeOf(source) - if isinstance(SRCTYPE, Ptr): - # lltype - rgc.ll_arraycopy(source.ll_items(), dest.ll_items(), - source_start, dest_start, length) - else: - # ootype -- XXX improve the case of array->array copy? - i = 0 - while i < length: - item = source.ll_getitem_fast(source_start + i) - dest.ll_setitem_fast(dest_start + i, item) - i += 1 + # lltype + rgc.ll_arraycopy(source.ll_items(), dest.ll_items(), + source_start, dest_start, length) def ll_copy(RESLIST, l): diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -757,9 +757,7 @@ # get flowed and annotated, mostly with SomePtr. # -# this class contains low level helpers used both by lltypesystem and -# ootypesystem; each typesystem should subclass it and add its own -# primitives. +# this class contains low level helpers used both by lltypesystem class AbstractLLHelpers: __metaclass__ = StaticMethods From noreply at buildbot.pypy.org Sun Jul 28 21:53:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 21:53:08 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: hg merge default Message-ID: <20130728195308.275451C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: kill-gen-store-back-in Changeset: r65758:cd29aa0e80ae Date: 2013-07-28 21:52 +0200 http://bitbucket.org/pypy/pypy/changeset/cd29aa0e80ae/ Log: hg merge default diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,20 +65,17 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code @@ -93,31 +90,31 @@ `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -559,17 +559,6 @@ interpret_raises(IndexError, raise_exception, [42]) interpret_raises(ValueError, raise_exception, [43]) -By default the ``interpret`` and ``interpret_raises`` functions use -the low-level typesystem. If you want to use the object oriented one -you have to set the ``type_system`` parameter to the string -``'ootype'``:: - - def test_invert(): - def f(x): - return ~x - res = interpret(f, [3], type_system='ootype') - assert res == ~3 - .. _annotator: translation.html#the-annotation-pass .. include:: _ref.txt diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -190,7 +190,7 @@ return (fnaddr, calldescr) def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None, extradescrs=None): + extraeffect=None): """Return the calldescr that describes all calls done by 'op'. This returns a calldescr that we can put in the corresponding call operation in the calling jitcode. It gets an effectinfo @@ -249,7 +249,7 @@ effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op), self.cpu, extraeffect, oopspecindex, can_invalidate, call_release_gil_target, - extradescrs) + ) # assert effectinfo is not None if elidable or loopinvariant: diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -96,13 +96,11 @@ extraeffect=EF_CAN_RAISE, oopspecindex=OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL, - extra_descrs=None): + call_release_gil_target=llmemory.NULL): key = (frozenset_or_none(readonly_descrs_fields), frozenset_or_none(readonly_descrs_arrays), frozenset_or_none(write_descrs_fields), frozenset_or_none(write_descrs_arrays), - frozenset_or_none(extra_descrs), extraeffect, oopspecindex, can_invalidate) @@ -135,7 +133,6 @@ result.can_invalidate = can_invalidate result.oopspecindex = oopspecindex result.call_release_gil_target = call_release_gil_target - result.extra_descrs = extra_descrs if result.check_can_raise(): assert oopspecindex in cls._OS_CANRAISE cls._cache[key] = result @@ -178,8 +175,7 @@ extraeffect=EffectInfo.EF_CAN_RAISE, oopspecindex=EffectInfo.OS_NONE, can_invalidate=False, - call_release_gil_target=llmemory.NULL, - extra_descrs=None): + call_release_gil_target=llmemory.NULL): from rpython.translator.backendopt.writeanalyze import top_set if effects is top_set or extraeffect == EffectInfo.EF_RANDOM_EFFECTS: readonly_descrs_fields = None @@ -228,8 +224,7 @@ extraeffect, oopspecindex, can_invalidate, - call_release_gil_target, - extra_descrs) + call_release_gil_target) def consider_struct(TYPE, fieldname): if fieldType(TYPE, fieldname) is lltype.Void: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -359,12 +359,11 @@ lst.append(v) def handle_residual_call(self, op, extraargs=[], may_call_jitcodes=False, - oopspecindex=EffectInfo.OS_NONE, extradescrs=None): + oopspecindex=EffectInfo.OS_NONE): """A direct_call turns into the operation 'residual_call_xxx' if it is calling a function that we don't want to JIT. The initial args of 'residual_call_xxx' are the function to call, and its calldescr.""" - calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex, - extradescrs=extradescrs) + calldescr = self.callcontrol.getcalldescr(op, oopspecindex=oopspecindex) op1 = self.rewrite_call(op, 'residual_call', [op.args[0]] + extraargs, calldescr=calldescr) if may_call_jitcodes or self.callcontrol.calldescr_canraise(calldescr): diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -73,7 +73,7 @@ def guess_call_kind(self, op): return 'residual' def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE, - extraeffect=None, extradescrs=None): + extraeffect=None): try: name = op.args[0].value._obj._name if 'cannot_raise' in name or name.startswith('cast_'): diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -2108,8 +2108,7 @@ def identityhash(p): """Returns the lltype-level hash of the given GcStruct. - Also works with most ootype objects. Not for NULL. - See rlib.objectmodel.compute_identity_hash() for more + Not for NULL. See rlib.objectmodel.compute_identity_hash() for more information about the RPython-level meaning of this. """ assert p diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -503,9 +503,7 @@ return l -# return a nullptr() if lst is a list of pointers it, else None. Note -# that if we are using ootypesystem there are not pointers, so we -# always return None. +# return a nullptr() if lst is a list of pointers it, else None. def ll_null_item(lst): LIST = typeOf(lst) if isinstance(LIST, Ptr): @@ -516,25 +514,15 @@ def listItemType(lst): LIST = typeOf(lst) - if isinstance(LIST, Ptr): # lltype - LIST = LIST.TO - return LIST.ITEM + return LIST.TO.ITEM @signature(types.any(), types.any(), types.int(), types.int(), types.int(), returns=types.none()) def ll_arraycopy(source, dest, source_start, dest_start, length): SRCTYPE = typeOf(source) - if isinstance(SRCTYPE, Ptr): - # lltype - rgc.ll_arraycopy(source.ll_items(), dest.ll_items(), - source_start, dest_start, length) - else: - # ootype -- XXX improve the case of array->array copy? - i = 0 - while i < length: - item = source.ll_getitem_fast(source_start + i) - dest.ll_setitem_fast(dest_start + i, item) - i += 1 + # lltype + rgc.ll_arraycopy(source.ll_items(), dest.ll_items(), + source_start, dest_start, length) def ll_copy(RESLIST, l): diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -757,9 +757,7 @@ # get flowed and annotated, mostly with SomePtr. # -# this class contains low level helpers used both by lltypesystem and -# ootypesystem; each typesystem should subclass it and add its own -# primitives. +# this class contains low level helpers used both by lltypesystem class AbstractLLHelpers: __metaclass__ = StaticMethods diff --git a/rpython/rtyper/test/test_rvirtualizable.py b/rpython/rtyper/test/test_rvirtualizable.py --- a/rpython/rtyper/test/test_rvirtualizable.py +++ b/rpython/rtyper/test/test_rvirtualizable.py @@ -57,7 +57,7 @@ block = graph.startblock op_promote = block.operations[-2] op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' v_inst = op_getfield.args[0] assert op_promote.opname == 'jit_force_virtualizable' assert op_promote.args[0] is v_inst @@ -72,7 +72,7 @@ block = graph.startblock op_promote = block.operations[-2] op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' v_inst = op_getfield.args[0] assert op_promote.opname == 'jit_force_virtualizable' assert op_promote.args[0] is v_inst @@ -86,7 +86,7 @@ block = graph.startblock op_getfield = block.operations[-1] op_call = block.operations[-2] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' assert op_call.opname == 'direct_call' # to V.__init__ def test_generate_force_virtualizable_array(self): @@ -99,7 +99,7 @@ op_getfield = block.operations[-2] op_getarrayitem = block.operations[-1] assert op_getarrayitem.opname == 'direct_call' # to ll_getitem_xxx - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' v_inst = op_getfield.args[0] assert op_promote.opname == 'jit_force_virtualizable' assert op_promote.args[0] is v_inst @@ -168,13 +168,13 @@ _, rtyper, graph = self.gengraph(fn, [int]) block = graph.startblock op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' funcptr = self.replace_force_virtualizable(rtyper, [graph]) if getattr(option, 'view', False): graph.show() op_promote = block.operations[-2] op_getfield = block.operations[-1] - assert op_getfield.opname in ('getfield', 'oogetfield') + assert op_getfield.opname == 'getfield' assert op_promote.opname == 'direct_call' assert op_promote.args[0].value == funcptr assert op_promote.args[1] == op_getfield.args[0] From noreply at buildbot.pypy.org Sun Jul 28 22:05:29 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 28 Jul 2013 22:05:29 +0200 (CEST) Subject: [pypy-commit] pypy kill-gen-store-back-in: merge mistake Message-ID: <20130728200529.B82E71C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: kill-gen-store-back-in Changeset: r65759:b7706614544c Date: 2013-07-28 22:04 +0200 http://bitbucket.org/pypy/pypy/changeset/b7706614544c/ Log: merge mistake diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -4,7 +4,7 @@ from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.jit.metainterp.warmspot import get_translator +from rpython.jit.metainterp.warmspot import get_translator, get_stats from rpython.jit.metainterp.resoperation import rop from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask From noreply at buildbot.pypy.org Sun Jul 28 22:13:18 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 28 Jul 2013 22:13:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed some code that had been commented out since forever: Message-ID: <20130728201318.B002F1C030B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65760:6137a9ea6b51 Date: 2013-07-28 13:12 -0700 http://bitbucket.org/pypy/pypy/changeset/6137a9ea6b51/ Log: Removed some code that had been commented out since forever: http://nedbatchelder.com/text/deleting-code.html diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -210,8 +210,6 @@ r.const, = answers return r -##def builtin_callable(s_obj): -## return SomeBool() def builtin_tuple(s_iterable): if isinstance(s_iterable, SomeTuple): diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -151,11 +151,6 @@ BoolOption("no__thread", "don't use __thread for implementing TLS", default=False, cmdline="--no__thread", negation=False), -## --- not supported since a long time. Use the env vars CFLAGS/LDFLAGS. -## StrOption("compilerflags", "Specify flags for the C compiler", -## cmdline="--cflags"), -## StrOption("linkerflags", "Specify flags for the linker (C backend only)", -## cmdline="--ldflags"), IntOption("make_jobs", "Specify -j argument to make for compilation" " (C backend only)", cmdline="--make-jobs", default=detect_number_of_processors()), diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -137,18 +137,6 @@ self.gc_ll_descr = gc_ll_descr self.fake_cpu = FakeCPU() -## def test_args_for_new(self): -## S = lltype.GcStruct('S', ('x', lltype.Signed)) -## sizedescr = get_size_descr(self.gc_ll_descr, S) -## args = self.gc_ll_descr.args_for_new(sizedescr) -## for x in args: -## assert lltype.typeOf(x) == lltype.Signed -## A = lltype.GcArray(lltype.Signed) -## arraydescr = get_array_descr(self.gc_ll_descr, A) -## args = self.gc_ll_descr.args_for_new(sizedescr) -## for x in args: -## assert lltype.typeOf(x) == lltype.Signed - def test_gc_malloc(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) sizedescr = descr.get_size_descr(self.gc_ll_descr, S) diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -190,10 +190,6 @@ # for args in args_lists: suffix = "" - ## all = instr.as_all_suffixes - ## for m, extra in args: - ## if m in (i386.MODRM, i386.MODRM8) or all: - ## suffix = suffixes[sizes[m]] + suffix if (argmodes and not self.is_xmm_insn and not instrname.startswith('FSTP')): suffix = suffixes[self.WORD] diff --git a/rpython/jit/codewriter/test/test_longlong.py b/rpython/jit/codewriter/test/test_longlong.py --- a/rpython/jit/codewriter/test/test_longlong.py +++ b/rpython/jit/codewriter/test/test_longlong.py @@ -236,17 +236,3 @@ assert list(op1.args[3]) == vlist assert op1.result == v_result - -##def test_singlefloat_constants(): -## v_x = varoftype(TYPE) -## vlist = [v_x, const(rffi.cast(TYPE, 7))] -## v_result = varoftype(TYPE) -## op = SpaceOperation('llong_add', vlist, v_result) -## tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) -## op1 = tr.rewrite_operation(op) -## # -## assert op1.opname == 'residual_call_irf_f' -## assert list(op1.args[2]) == [] -## assert list(op1.args[3]) == [] -## assert list(op1.args[4]) == vlist -## assert op1.result == v_result diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -283,22 +283,6 @@ # ____________________________________________________________ -##def do_force_token(cpu): -## raise NotImplementedError - -##def do_virtual_ref(cpu, box1, box2): -## raise NotImplementedError - -##def do_virtual_ref_finish(cpu, box1, box2): -## raise NotImplementedError - -##def do_debug_merge_point(cpu, box1): -## from rpython.jit.metainterp.warmspot import get_stats -## loc = box1._get_str() -## get_stats().add_merge_point_location(loc) - -# ____________________________________________________________ - def _make_execute_list(): execute_by_num_args = {} diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -411,18 +411,6 @@ def optimize_INSTANCE_PTR_NE(self, op): self._optimize_oois_ooisnot(op, True, True) -## def optimize_INSTANCEOF(self, op): -## value = self.getvalue(op.args[0]) -## realclassbox = value.get_constant_class(self.optimizer.cpu) -## if realclassbox is not None: -## checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr) -## result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu, -## realclassbox, -## checkclassbox) -## self.make_constant_int(op.result, result) -## return -## self.emit_operation(op) - def optimize_CALL(self, op): # dispatch based on 'oopspecindex' to a method that handles # specifically the given oopspec call. For non-oopspec calls, diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -698,7 +698,6 @@ self.make_constant_int(op.result, value.getlength()) else: value.ensure_nonnull() - ###self.optimize_default(op) self.emit_operation(op) def optimize_GETARRAYITEM_GC(self, op): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1254,10 +1254,6 @@ def setup_resume_at_op(self, pc): self.pc = pc - ## values = ' '.join([box.repr_rpython() for box in self.env]) - ## log('setup_resume_at_op %s:%d [%s] %d' % (self.jitcode.name, - ## self.pc, values, - ## self.exception_target)) def run_one_step(self): # Execute the frame forward. This method contains a loop that leaves diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -299,10 +299,6 @@ else: malloc_fixedsize_meth = None self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr -## self.malloc_varsize_ptr = getfn( -## GCClass.malloc_varsize.im_func, -## [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)] -## + [annmodel.SomeBool()], s_gcref) self.malloc_varsize_clear_ptr = getfn( GCClass.malloc_varsize_clear.im_func, [s_gc, s_typeid16] diff --git a/rpython/memory/gctransform/refcounting.py b/rpython/memory/gctransform/refcounting.py --- a/rpython/memory/gctransform/refcounting.py +++ b/rpython/memory/gctransform/refcounting.py @@ -12,22 +12,6 @@ counts = {} -## def print_call_chain(ob): -## import sys -## f = sys._getframe(1) -## stack = [] -## flag = False -## while f: -## if f.f_locals.get('self') is ob: -## stack.append((f.f_code.co_name, f.f_locals.get('TYPE'))) -## if not flag: -## counts[f.f_code.co_name] = counts.get(f.f_code.co_name, 0) + 1 -## print counts -## flag = True -## f = f.f_back -## stack.reverse() -## for i, (a, b) in enumerate(stack): -## print ' '*i, a, repr(b)[:100-i-len(a)], id(b) ADDRESS_VOID_FUNC = lltype.FuncType([llmemory.Address], lltype.Void) diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -429,26 +429,3 @@ expected_length = len(itemloaders) unroll_item_loaders = unrolling_iterable(enumerate(itemloaders)) add_loader(s_tuple, load_tuple) - - -## -- not used any more right now -- -##class __extend__(pairtype(MTag, controllerentry.SomeControlledInstance)): -## # marshal a ControlledInstance by marshalling the underlying object - -## def install_marshaller((tag, s_obj)): -## def dump_controlled_instance(buf, x): -## real_obj = controllerentry.controlled_instance_unbox(controller, x) -## realdumper(buf, real_obj) - -## controller = s_obj.controller -## realdumper = get_marshaller(s_obj.s_real_obj) -## add_dumper(s_obj, dump_controlled_instance) - -## def install_unmarshaller((tag, s_obj)): -## def load_controlled_instance(loader): -## real_obj = realloader(loader) -## return controllerentry.controlled_instance_box(controller, -## real_obj) -## controller = s_obj.controller -## realloader = get_loader(s_obj.s_real_obj) -## add_loader(s_obj, load_controlled_instance) diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -343,9 +343,6 @@ def _short_name(self): return "%s %s" % (self.__class__.__name__, self._name) -## def _defl(self, parent=None, parentindex=None): -## return _struct(self, parent=parent, parentindex=parentindex) - def _allocate(self, initialization, parent=None, parentindex=None): return _struct(self, initialization=initialization, parent=parent, parentindex=parentindex) @@ -1029,15 +1026,6 @@ parent = container._parentstructure() if parent is not None: return parent, container._parent_index -## if isinstance(parent, _struct): -## for name in parent._TYPE._names: -## if getattr(parent, name) is container: -## return parent, name -## raise RuntimeError("lost ourselves") -## if isinstance(parent, _array): -## raise TypeError("cannot fish a pointer to an array item or an " -## "inlined substructure of it") -## raise AssertionError("don't know about %r" % (parent,)) else: return None, None diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -452,10 +452,6 @@ def op_cast_int_to_adr(int): return llmemory.cast_int_to_adr(int) -##def op_cast_int_to_adr(x): -## assert type(x) is int -## return llmemory.cast_int_to_adr(x) - def op_convert_float_bytes_to_longlong(a): from rpython.rlib.longlong2float import float2longlong return float2longlong(a) diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -54,7 +54,7 @@ # ... // extra instance attributes # } # -# there's also a nongcobject +# there's also a nongcobject OBJECT_VTABLE = lltype.ForwardReference() CLASSTYPE = Ptr(OBJECT_VTABLE) @@ -284,16 +284,11 @@ cname = inputconst(Void, mangled_name) return llops.genop('getfield', [v_vtable, cname], resulttype=r) - def rtype_issubtype(self, hop): + def rtype_issubtype(self, hop): class_repr = get_type_repr(self.rtyper) v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr) if isinstance(v_cls2, Constant): cls2 = v_cls2.value - # XXX re-implement the following optimization -## if cls2.subclassrange_max == cls2.subclassrange_min: -## # a class with no subclass -## return hop.genop('ptr_eq', [v_cls1, v_cls2], resulttype=Bool) -## else: minid = hop.inputconst(Signed, cls2.subclassrange_min) maxid = hop.inputconst(Signed, cls2.subclassrange_max) return hop.gendirectcall(ll_issubclass_const, v_cls1, minid, @@ -313,7 +308,7 @@ else: ForwardRef = lltype.FORWARDREF_BY_FLAVOR[LLFLAVOR[gcflavor]] self.object_type = ForwardRef() - + self.iprebuiltinstances = identity_dict() self.lowleveltype = Ptr(self.object_type) self.gcflavor = gcflavor diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py --- a/rpython/rtyper/lltypesystem/rpbc.py +++ b/rpython/rtyper/lltypesystem/rpbc.py @@ -181,9 +181,6 @@ funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) return self.convert_desc(funcdesc) -## def convert_to_concrete_llfn(self, v, shape, index, llop): -## return v - def rtype_simple_call(self, hop): return self.call('simple_call', hop) @@ -321,15 +318,6 @@ c_table = conversion_table(r_from, r_to) if c_table: assert v.concretetype is Char -## from rpython.rtyper.lltypesystem.rstr import string_repr -## s = repr(llops.rtyper.annotator.annotated.get(llops.originalblock)) -## if 'LOAD_GLOBAL' in s: -## import pdb; pdb.set_trace() -## print >> myf, 'static small conv', s -## print 'static small conv', s -## llops.genop('debug_print', -## [Constant(string_repr.convert_const("dynamic small conv" + s), -## string_repr.lowleveltype)]) v_int = llops.genop('cast_char_to_int', [v], resulttype=Signed) return llops.genop('getarrayitem', [c_table, v_int], diff --git a/rpython/rtyper/lltypesystem/rvirtualizable2.py b/rpython/rtyper/lltypesystem/rvirtualizable2.py --- a/rpython/rtyper/lltypesystem/rvirtualizable2.py +++ b/rpython/rtyper/lltypesystem/rvirtualizable2.py @@ -11,17 +11,3 @@ if self.top_of_virtualizable_hierarchy: llfields.append(('vable_token', llmemory.GCREF)) return llfields - -## The code below is commented out because vtable_token is always -## initialized to NULL anyway. -## -## def set_vable(self, llops, vinst, force_cast=False): -## if self.top_of_virtualizable_hierarchy: -## if force_cast: -## vinst = llops.genop('cast_pointer', [vinst], resulttype=self) -## cname = inputconst(lltype.Void, 'vable_token') -## cvalue = inputconst(llmemory.GCREF, -## lltype.nullptr(llmemory.GCREF.TO)) -## llops.genop('setfield', [vinst, cname, cvalue]) -## else: -## self.rbase.set_vable(llops, vinst, force_cast=True) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -326,15 +326,6 @@ return NotImplemented return v -## # TODO: move it to lltypesystem -## def rtype_is_((r_lst1, r_lst2), hop): -## if r_lst1.lowleveltype != r_lst2.lowleveltype: -## # obscure logic, the is can be true only if both are None -## v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2) -## return hop.gendirectcall(ll_both_none, v_lst1, v_lst2) - -## return pairtype(Repr, Repr).rtype_is_(pair(r_lst1, r_lst2), hop) - def rtype_eq((r_lst1, r_lst2), hop): assert r_lst1.item_repr == r_lst2.item_repr v_lst1, v_lst2 = hop.inputargs(r_lst1, r_lst2) diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -60,14 +60,6 @@ t = () return tuple([self.__class__, self.can_be_None]+lst)+t -##builtin_descriptor_type = ( -## type(len), # type 'builtin_function_or_method' -## type(list.append), # type 'method_descriptor' -## type(type(None).__repr__), # type 'wrapper_descriptor' -## type(type.__dict__['__dict__']), # type 'getset_descriptor' -## type(type.__dict__['__flags__']), # type 'member_descriptor' -## ) - # ____________________________________________________________ class ConcreteCallTableRow(dict): @@ -196,16 +188,6 @@ funcdesc = self.s_pbc.any_description() return funcdesc.get_s_signatures(shape) -## def function_signatures(self): -## if self._function_signatures is None: -## self._function_signatures = {} -## for func in self.s_pbc.prebuiltinstances: -## if func is not None: -## self._function_signatures[func] = getsignature(self.rtyper, -## func) -## assert self._function_signatures -## return self._function_signatures - def convert_desc(self, funcdesc): # get the whole "column" of the call table corresponding to this desc try: @@ -876,16 +858,6 @@ return hop2 # ____________________________________________________________ -##def getsignature(rtyper, func): -## f = rtyper.getcallable(func) -## graph = rtyper.type_system_deref(f).graph -## rinputs = [rtyper.bindingrepr(v) for v in graph.getargs()] -## if graph.getreturnvar() in rtyper.annotator.bindings: -## rresult = rtyper.bindingrepr(graph.getreturnvar()) -## else: -## rresult = Void -## return f, rinputs, rresult - def samesig(funcs): import inspect argspec = inspect.getargspec(funcs[0]) diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -9,16 +9,12 @@ class __extend__(annmodel.SomePtr): def rtyper_makerepr(self, rtyper): -## if self.is_constant() and not self.const: # constant NULL -## return nullptr_repr -## else: return PtrRepr(self.ll_ptrtype) + def rtyper_makekey(self): -## if self.is_constant() and not self.const: -## return None -## else: return self.__class__, self.ll_ptrtype + class __extend__(annmodel.SomeInteriorPtr): def rtyper_makerepr(self, rtyper): return InteriorPtrRepr(self.ll_ptrtype) @@ -154,22 +150,6 @@ vlist = hop.inputargs(r_ptr, lltype.Signed, hop.args_r[2]) hop.genop('setarrayitem', vlist) -# ____________________________________________________________ -# -# Null Pointers - -##class NullPtrRepr(Repr): -## lowleveltype = lltype.Void - -## def rtype_is_true(self, hop): -## return hop.inputconst(lltype.Bool, False) - -##nullptr_repr = NullPtrRepr() - -##class __extend__(pairtype(NullPtrRepr, PtrRepr)): -## def convert_from_to((r_null, r_ptr), v, llops): -## # nullptr to general pointer -## return inputconst(r_ptr, _ptr(r_ptr.lowleveltype, None)) # ____________________________________________________________ # diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -676,13 +676,6 @@ get_ll_fasthash_function = get_ll_hash_function -## def rtype_len(_, hop): -## return hop.inputconst(Signed, 1) -## -## def rtype_is_true(_, hop): -## assert not hop.args_s[0].can_be_None -## return hop.inputconst(Bool, True) - def rtype_ord(_, hop): rstr = hop.rtyper.type_system.rstr vlist = hop.inputargs(rstr.unichar_repr) @@ -694,10 +687,6 @@ pairtype(AbstractUniCharRepr, AbstractCharRepr)): def rtype_eq(_, hop): return _rtype_unchr_compare_template(hop, 'eq') def rtype_ne(_, hop): return _rtype_unchr_compare_template(hop, 'ne') -## def rtype_lt(_, hop): return _rtype_unchr_compare_template(hop, 'lt') -## def rtype_le(_, hop): return _rtype_unchr_compare_template(hop, 'le') -## def rtype_gt(_, hop): return _rtype_unchr_compare_template(hop, 'gt') -## def rtype_ge(_, hop): return _rtype_unchr_compare_template(hop, 'ge') #Helper functions for comparisons diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -77,14 +77,6 @@ except: self.seed = 0 self.order = None - # the following code would invoke translator.goal.order, which is - # not up-to-date any more: -## RTYPERORDER = os.getenv('RTYPERORDER') -## if RTYPERORDER: -## order_module = RTYPERORDER.split(',')[0] -## self.order = __import__(order_module, {}, {}, ['*']).order -## s = 'Using %s.%s for order' % (self.order.__module__, self.order.__name__) -## self.log.info(s) def getconfig(self): return self.annotator.translator.config diff --git a/rpython/rtyper/rvirtualizable2.py b/rpython/rtyper/rvirtualizable2.py --- a/rpython/rtyper/rvirtualizable2.py +++ b/rpython/rtyper/rvirtualizable2.py @@ -22,9 +22,6 @@ def _setup_repr_llfields(self): raise NotImplementedError -## def set_vable(self, llops, vinst, force_cast=False): -## raise NotImplementedError - def _setup_repr(self): if self.top_of_virtualizable_hierarchy: hints = {'virtualizable2_accessor': self.accessor} @@ -42,11 +39,6 @@ # not need it, but it doesn't hurt to have it anyway self.my_redirected_fields = self.rbase.my_redirected_fields -## def new_instance(self, llops, classcallhop=None): -## vptr = self._super().new_instance(llops, classcallhop) -## self.set_vable(llops, vptr) -## return vptr - def hook_access_field(self, vinst, cname, llops, flags): #if not flags.get('access_directly'): if self.my_redirected_fields.get(cname.value): diff --git a/rpython/rtyper/test/test_nongc.py b/rpython/rtyper/test/test_nongc.py --- a/rpython/rtyper/test/test_nongc.py +++ b/rpython/rtyper/test/test_nongc.py @@ -230,6 +230,3 @@ assert isinstance(s, annmodel.SomeAddress) rtyper = RPythonTyper(a) rtyper.specialize() -## from rpython.memory.lladdress import _address -## res = interpret(malloc_and_free, [_address()]) -## assert res == _address() diff --git a/rpython/translator/backendopt/test/test_raisingop2direct_call.py b/rpython/translator/backendopt/test/test_raisingop2direct_call.py --- a/rpython/translator/backendopt/test/test_raisingop2direct_call.py +++ b/rpython/translator/backendopt/test/test_raisingop2direct_call.py @@ -51,18 +51,6 @@ res = fn(-5, 2) assert res == -3 - # this becomes an int_floordiv_ovf_zer already? -## def g(x, y): -## try: -## return ovfcheck(x//y) -## except OverflowError: -## return 123 -## gn = get_runner(g, 'int_floordiv_ovf', [int, int]) -## res = gn(-sys.maxint-1, -1) -## assert res == 123 -## res = gn(-5, 2) -## assert res == -3 - def h(x, y): try: return ovfcheck(x//y) diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -65,8 +65,6 @@ self.buf += ''.join([buf[i] for i in range(count)]) self.buflen *= 2 -##CFalse = CDefinedIntSymbolic('0') # hack hack - def sandboxed_io(buf): STDIN = 0 STDOUT = 1 diff --git a/rpython/translator/tool/lltracker.py b/rpython/translator/tool/lltracker.py --- a/rpython/translator/tool/lltracker.py +++ b/rpython/translator/tool/lltracker.py @@ -106,8 +106,6 @@ yield name + ' @hdr', self.normalize(addrof._obj) else: yield name + ' @', self.normalize(o.ptr._obj) -## if o.offset: -## yield '... offset', str(o.offset) else: yield name, str(o) From noreply at buildbot.pypy.org Mon Jul 29 01:30:57 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 29 Jul 2013 01:30:57 +0200 (CEST) Subject: [pypy-commit] pypy default: removed these assertions for now, they're too brittle Message-ID: <20130728233057.5E8C01C00D8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65761:0bf4ba17240d Date: 2013-07-28 16:30 -0700 http://bitbucket.org/pypy/pypy/changeset/0bf4ba17240d/ Log: removed these assertions for now, they're too brittle diff --git a/rpython/rtyper/module/test/test_ll_os.py b/rpython/rtyper/module/test/test_ll_os.py --- a/rpython/rtyper/module/test/test_ll_os.py +++ b/rpython/rtyper/module/test/test_ll_os.py @@ -53,8 +53,7 @@ expected = os.statvfs('.') except OSError, e: py.test.skip("the underlying os.statvfs() failed: %s" % e) - data = getllimpl(os.statvfs)('.') - assert data == expected + getllimpl(os.statvfs)('.') def test_fstatvfs(): if not hasattr(os, 'fstatvfs'): @@ -63,8 +62,7 @@ expected = os.fstatvfs(0) except OSError, e: py.test.skip("the underlying os.fstatvfs() failed: %s" % e) - data = getllimpl(os.fstatvfs)(0) - assert data == expected + getllimpl(os.fstatvfs)(0) def test_utimes(): if os.name != 'nt': From noreply at buildbot.pypy.org Mon Jul 29 01:31:56 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 29 Jul 2013 01:31:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Documented this merged branch. Message-ID: <20130728233156.367F21C00D8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65762:a8ee65110a32 Date: 2013-07-28 16:31 -0700 http://bitbucket.org/pypy/pypy/changeset/a8ee65110a32/ Log: Documented this merged branch. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,7 @@ Allow subclassing ndarray, i.e. matrix .. branch: kill-ootype + +.. branch: fast-slowpath +Added an abstraction for functions with a fast and slow path in the JIT. This +speeds up list.append() and list.pop(). From noreply at buildbot.pypy.org Mon Jul 29 03:08:49 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 29 Jul 2013 03:08:49 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: kill typesystem specialization Message-ID: <20130729010849.505A81C030B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65763:5187c3d557f1 Date: 2013-07-28 20:54 +0100 http://bitbucket.org/pypy/pypy/changeset/5187c3d557f1/ Log: kill typesystem specialization diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -73,16 +73,6 @@ return LowLevelAnnotatorPolicy.lowlevelspecialize(funcdesc, args_s, {}) default_specialize = staticmethod(default_specialize) - def specialize__ts(pol, funcdesc, args_s, ref): - ts = pol.rtyper.type_system - ref = ref.split('.') - x = ts - for part in ref: - x = getattr(x, part) - bk = pol.rtyper.annotator.bookkeeper - funcdesc2 = bk.getdesc(x) - return pol.default_specialize(funcdesc2, args_s) - def specialize__semierased(funcdesc, args_s): a2l = annmodel.annotation_to_lltype l2a = annmodel.lltype_to_annotation diff --git a/rpython/rtyper/lltypesystem/ll_str.py b/rpython/rtyper/lltypesystem/ll_str.py --- a/rpython/rtyper/lltypesystem/ll_str.py +++ b/rpython/rtyper/lltypesystem/ll_str.py @@ -1,14 +1,9 @@ from rpython.rtyper.lltypesystem.lltype import GcArray, Array, Char, malloc -from rpython.rtyper.annlowlevel import llstr from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib import jit CHAR_ARRAY = GcArray(Char) - at jit.elidable -def ll_int_str(repr, i): - return ll_int2dec(i) - def ll_unsigned(i): if isinstance(i, r_longlong) or isinstance(i, r_ulonglong): return r_ulonglong(i) @@ -47,7 +42,7 @@ hex_chars = malloc(Array(Char), 16, immortal=True) for i in range(16): - hex_chars[i] = "%x"%i + hex_chars[i] = "%x" % i @jit.elidable def ll_int2hex(i, addPrefix): @@ -122,8 +117,3 @@ result.chars[j] = temp[len-j-1] j += 1 return result - - at jit.elidable -def ll_float_str(repr, f): - from rpython.rlib.rfloat import formatd - return llstr(formatd(f, 'f', 6)) diff --git a/rpython/rtyper/rfloat.py b/rpython/rtyper/rfloat.py --- a/rpython/rtyper/rfloat.py +++ b/rpython/rtyper/rfloat.py @@ -1,6 +1,9 @@ from rpython.annotator import model as annmodel from rpython.rlib.objectmodel import _hash_float from rpython.rlib.rarithmetic import base_int +from rpython.rlib.rfloat import formatd +from rpython.rlib import jit +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, SignedLongLong, UnsignedLongLong, Bool, Float) @@ -134,11 +137,9 @@ hop.exception_cannot_occur() return vlist[0] - # version picked by specialisation based on which - # type system rtyping is using, from .ll_str module + @jit.elidable def ll_str(self, f): - pass - ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_float_str')" + return llstr(formatd(f, 'f', 6)) # # _________________________ Conversions _________________________ diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -2,7 +2,7 @@ from rpython.annotator import model as annmodel from rpython.flowspace.operation import op_appendices -from rpython.rlib import objectmodel +from rpython.rlib import objectmodel, jit from rpython.rlib.rarithmetic import intmask, r_int, r_longlong from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import (Signed, Unsigned, Bool, Float, @@ -365,11 +365,10 @@ hop.exception_cannot_occur() return vlist[0] - # version picked by specialisation based on which - # type system rtyping is using, from .ll_str module + @jit.elidable def ll_str(self, i): - raise NotImplementedError - ll_str._annspecialcase_ = "specialize:ts('ll_str.ll_int_str')" + from rpython.rtyper.lltypesystem.ll_str import ll_int2dec + return ll_int2dec(i) def rtype_hex(self, hop): self = self.as_int From noreply at buildbot.pypy.org Mon Jul 29 03:08:51 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 29 Jul 2013 03:08:51 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: hg merge default Message-ID: <20130729010851.F00981C030B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65764:de3398258e38 Date: 2013-07-28 21:18 +0100 http://bitbucket.org/pypy/pypy/changeset/de3398258e38/ Log: hg merge default diff too long, truncating to 2000 out of 4287 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,65 +65,56 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ @@ -258,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -300,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is @@ -84,12 +83,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -72,8 +72,6 @@ `command line reference`_ -`CLI backend`_ describes the details of the .NET backend. - `JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler from our Python interpreter. diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -432,226 +432,6 @@ See for example `rpython/rtyper/rlist.py`_. -.. _`oo type`: - -Object Oriented Types ---------------------- - -The standard `low-level type` model described above is fine for -targeting low level backends such as C, but it is not good -enough for targeting higher level backends such as .NET CLI or Java -JVM, so a new object oriented model has been introduced. This model is -implemented in the first part of `rpython/rtyper/ootypesystem/ootype.py`_. - -As for the low-level typesystem, the second part of -`rpython/rtyper/ootypesystem/ootype.py`_ is a runnable implementation of -these types, for testing purposes. - - -The target platform -+++++++++++++++++++ - -There are plenty of object oriented languages and platforms around, -each one with its own native features: they could be statically or -dynamically typed, they could support or not things like multiple -inheritance, classes and functions as first class order objects, -generics, and so on. - -The goal of *ootypesystem* is to define a trade-off between all -the potential backends that let them to use the native facilities when -available while not preventing other backends to work when they -aren't. - - -Types and classes -+++++++++++++++++ - -Most of the primitive types defined in *ootypesystem* are the very -same of those found in *lltypesystem*: ``Bool``, ``Signed``, -``Unsigned``, ``Float``, ``Char``, ``UniChar`` and ``Void``. - -The target platform is supposed to support classes and instances with -**single inheritance**. Instances of user-defined classes are mapped -to the ``Instance`` type, whose ``_superclass`` attribute indicates -the base class of the instance. At the very beginning of the -inheritance hierarchy there is the ``Root`` object, i.e. the common -base class between all instances; if the target platform has the -notion of a common base class too, the backend can choose to map the -``Root`` class to its native equivalent. - -Object of ``Instance`` type can have attributes and methods: -attributes are got and set by the ``oogetfield`` and ``oosetfield`` -operations, while method calls are expressed by the ``oosend`` -operation. - -Classes are passed around using the ``Class`` type: this is a first -order class type whose only goal is to allow **runtime instantiation** -of the class. Backends that don't support this feature natively, such -as Java, may need to use some sort of placeholder instead. - - -Static vs. dynamic typing -+++++++++++++++++++++++++ - -The target platform is assumed to be **statically typed**, i.e. the -type of each object is known at compile time. - -As usual, it is possible to convert an object from type to type only -under certain conditions; there is a number of predefined conversions -between primitive types such as from ``Bool`` to ``Signed`` or from -``Signed`` to ``Float``. For each one of these conversions there is a -corresponding low level operation, such as ``cast_bool_to_int`` and -``cast_int_to_float``. - -Moreover it is possible to cast instances of a class up and down the -inheritance hierarchy with the ``ooupcast`` and ``oodowncast`` low -level operations. Implicit upcasting is not allowed, so you really -need to do a ``ooupcast`` for converting from a subclass to a -superclass. - -With this design statically typed backends can trivially insert -appropriate casts when needed, while dynamically typed backends can -simply ignore some of the operation such as ``ooupcast`` and -``oodowncast``. Backends that supports implicit upcasting, such as CLI -and Java, can simply ignore only ``ooupcast``. - -Object model -++++++++++++ - -The object model implemented by ootype is quite Java-like. The -following is a list of key features of the ootype object model which -have a direct correspondence in the Java or .NET object model: - - - classes have a static set of strongly typed methods and - attributes; - - - methods can be overriden in subclasses; every method is "virtual" - (i.e., can be overridden); methods can be "abstract" (i.e., need - to be overridden in subclasses); - - - classes support single inheritance; all classes inherit directly - or indirectly from the ROOT class; - - - there is some support for method overloading. This feature is not - used by the RTyper itself because RPython doesn't support method - overloading, but it is used by the GenCLI backend for offering - access to the native .NET libraries (see XXX); - - - all classes, attributes and methods are public: ootype is only - used internally by the translator, so there is no need to enforce - accessibility rules; - - - classes and functions are first-class order objects: this feature - can be easily simulated by backends for platforms on which it is not - a native feature; - - - there is a set of `built-in types`_ offering standard features. - -Exception handling -++++++++++++++++++ - -Since flow graphs are meant to be used also for very low level -backends such as C, they are quite unstructured: this means that the -target platform doesn't need to have a native exception handling -mechanism, since at the very least the backend can handle exceptions -just like ``genc`` does. - -By contrast we know that most of high level platforms natively support -exception handling, so *ootypesystem* is designed to let them to use -it. In particular the exception instances are typed with the -``Instance`` type, so the usual inheritance exception hierarchy is -preserved and the native way to catch exception should just work. - -.. `built-in types`_ - -Built-in types -++++++++++++++ - -It seems reasonable to assume high level platforms to provide built-in -facilities for common types such as *lists* or *hashtables*. - -RPython standard types such as ``List`` and ``Dict`` are implemented -on top of these common types; at the moment of writing there are six -built-in types: - - - **String**: self-descriptive - - - **StringBuilder**: used for dynamic building of string - - - **List**: a variable-sized, homogeneous list of object - - - **Dict**: a hashtable of homogeneous keys and values - - - **CustomDict**: same as dict, but with custom equal and hash - functions - - - **DictItemsIterator**: a helper class for iterating over the - elements of a ``Dict`` - - -Each of these types is a subtype of ``BuiltinADTType`` and has set of -ADT (Abstract Data Type) methods (hence the name of the base class) -for being manipulated. Examples of ADT methods are ``ll_length`` for -``List`` and ``ll_get`` for ``Dict``. - -From the backend point of view an instance of a built-in types is -treated exactly as a plain ``Instance``, so usually no special-casing -is needed. The backend is supposed to provide a bunch of classes -wrapping the native ones in order to provide the right signature and -semantic for the ADT methods. - -As an alternative, backends can special-case the ADT types to map them -directly to the native equivalent, translating the method names -on-the-fly at compile time. - -Generics -++++++++ - -Some target platforms offer native support for **generics**, i.e. -classes that can be parametrized on types, not only values. For -example, if one wanted to create a list using generics, a possible -declaration would be to say ``List``, where ``T`` represented the -type. When instantiated, one could create ``List`` or -``List``. The list is then treated as a list of whichever type -is specified. - -Each subclass of ``BuiltinADTTypes`` defines a bunch of type -parameters by creating some class level placeholder in the form of -``PARAMNAME_T``; then it fills up the ``_GENERIC_METHODS`` attribute -by defining the signature of each of the ADT methods using those -placeholders in the appropriate places. As an example, here is an -extract of *ootypesystem*'s List type:: - - class List(BuiltinADTType): - # placeholders for types - SELFTYPE_T = object() - ITEMTYPE_T = object() - - ... - - def _init_methods(self): - # 'ITEMTYPE_T' is used as a placeholder for indicating - # arguments that should have ITEMTYPE type. 'SELFTYPE_T' indicates 'self' - - self._GENERIC_METHODS = frozendict({ - "ll_length": Meth([], Signed), - "ll_getitem_fast": Meth([Signed], self.ITEMTYPE_T), - "ll_setitem_fast": Meth([Signed, self.ITEMTYPE_T], Void), - "_ll_resize_ge": Meth([Signed], Void), - "_ll_resize_le": Meth([Signed], Void), - "_ll_resize": Meth([Signed], Void), - }) - - ... - -Thus backends that support generics can simply look for placeholders -for discovering where the type parameters are used. Backends that -don't support generics can simply use the ``Root`` class instead and -insert the appropriate casts where needed. Note that placeholders -might also stand for primitive types, which typically require more -involved casts: e.g. in Java, making wrapper objects around ints. - - HighLevelOp interface --------------------- @@ -779,17 +559,6 @@ interpret_raises(IndexError, raise_exception, [42]) interpret_raises(ValueError, raise_exception, [43]) -By default the ``interpret`` and ``interpret_raises`` functions use -the low-level typesystem. If you want to use the object oriented one -you have to set the ``type_system`` parameter to the string -``'ootype'``:: - - def test_invert(): - def f(x): - return ~x - res = interpret(f, [3], type_system='ootype') - assert res == ~3 - .. _annotator: translation.html#the-annotation-pass .. include:: _ref.txt diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst --- a/pypy/doc/translation.rst +++ b/pypy/doc/translation.rst @@ -27,15 +27,10 @@ this task into several steps, and the purpose of this document is to introduce them. -As of the 1.2 release, RPython_ programs can be translated into the following -languages/platforms: C/POSIX, CLI/.NET -and Java/JVM. - .. _`application-level`: coding-guide.html#application-level .. _`interpreter-level`: coding-guide.html#interpreter-level -The choice of the target platform affects the process somewhat, but to -start with we describe the process of translating an RPython_ program into +To start with we describe the process of translating an RPython_ program into C (which is the default and original target). .. _`initialization time`: @@ -654,54 +649,6 @@ Use the :config:`translation.backend` option to choose which backend to use. - -The Object-Oriented Backends ----------------------------- - -The Object-Oriented backends target platforms that are less C-like and support -classes, instance etc. If such a platform is targeted, the `OO type system` is -used while rtyping. Of the OO backends, both gencli and genjava can translate -the full Python interpreter. - -.. _`oo type system`: rtyper.html#oo-type - -.. mention that pretty much all these backends are done by volunteers? - -GenCLI -++++++ - -GenCLI_ targets the `Common Language Infrastructure`_, the most famous -implementations of which are Microsoft's `.NET`_ and Mono_. - -It is the most advanced of the object oriented backends -- it can -compile the PyPy interpreter as well as our two standard benchmarks, -RPyStone (CPython's PyStone benchmark modified slightly to be RPython) -and a RPython version of the common Richards benchmark. - -It is almost entirely the work of Antonio Cuni, who started this -backend as part of his `Master's thesis`_, the Google's Summer of Code -2006 program and the Summer of PyPy program. - -.. _`Common Language Infrastructure`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`.NET`: http://www.microsoft.com/net/ -.. _Mono: http://www.mono-project.com/ -.. _`Master's thesis`: http://buildbot.pypy.org/misc/Implementing%20Python%20in%20.NET.pdf From noreply at buildbot.pypy.org Mon Jul 29 03:08:53 2013 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 29 Jul 2013 03:08:53 +0200 (CEST) Subject: [pypy-commit] pypy kill-typesystem: merge lltypesystem.rvirtualizable2 into rtyper.rvirtualizable2 Message-ID: <20130729010853.3DEB41C030B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: kill-typesystem Changeset: r65765:48dff2fcd56e Date: 2013-07-29 02:05 +0100 http://bitbucket.org/pypy/pypy/changeset/48dff2fcd56e/ Log: merge lltypesystem.rvirtualizable2 into rtyper.rvirtualizable2 diff --git a/rpython/rtyper/lltypesystem/rvirtualizable2.py b/rpython/rtyper/lltypesystem/rvirtualizable2.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rvirtualizable2.py +++ /dev/null @@ -1,13 +0,0 @@ -from rpython.rtyper.rmodel import inputconst -from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.lltypesystem.rclass import InstanceRepr, OBJECTPTR -from rpython.rtyper.rvirtualizable2 import AbstractVirtualizable2InstanceRepr - - -class Virtualizable2InstanceRepr(AbstractVirtualizable2InstanceRepr, InstanceRepr): - - def _setup_repr_llfields(self): - llfields = [] - if self.top_of_virtualizable_hierarchy: - llfields.append(('vable_token', llmemory.GCREF)) - return llfields diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -1,9 +1,11 @@ import types +from rpython.flowspace.model import Constant from rpython.annotator import description, model as annmodel from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Void from rpython.rtyper.rmodel import Repr, getgcflavor, inputconst +from rpython.rlib.objectmodel import UnboxedValue class FieldListAccessor(object): @@ -73,8 +75,7 @@ def buildinstancerepr(rtyper, classdef, gcflavor='gc'): - from rpython.rlib.objectmodel import UnboxedValue - from rpython.flowspace.model import Constant + from rpython.rtyper.rvirtualizable2 import Virtualizable2InstanceRepr if classdef is None: unboxed = [] @@ -91,8 +92,8 @@ if virtualizable2: assert len(unboxed) == 0 assert gcflavor == 'gc' - return rtyper.type_system.rvirtualizable2.Virtualizable2InstanceRepr(rtyper, classdef) - elif usetagging and rtyper.type_system.name == 'lltypesystem': + return Virtualizable2InstanceRepr(rtyper, classdef) + elif usetagging: # the UnboxedValue class and its parent classes need a # special repr for their instances if len(unboxed) != 1: diff --git a/rpython/rtyper/rvirtualizable2.py b/rpython/rtyper/rvirtualizable2.py --- a/rpython/rtyper/rvirtualizable2.py +++ b/rpython/rtyper/rvirtualizable2.py @@ -1,35 +1,40 @@ from rpython.rtyper.rmodel import inputconst, log -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rclass import AbstractInstanceRepr, FieldListAccessor +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.rclass import FieldListAccessor +from rpython.rtyper.lltypesystem.rclass import InstanceRepr -class AbstractVirtualizable2InstanceRepr(AbstractInstanceRepr): +class Virtualizable2InstanceRepr(InstanceRepr): def _super(self): - return super(AbstractVirtualizable2InstanceRepr, self) + return super(Virtualizable2InstanceRepr, self) def __init__(self, rtyper, classdef): self._super().__init__(rtyper, classdef) classdesc = classdef.classdesc if '_virtualizable2_' in classdesc.classdict: basedesc = classdesc.basedesc - assert basedesc is None or basedesc.lookup('_virtualizable2_') is None + assert basedesc is None or basedesc.lookup( + '_virtualizable2_') is None self.top_of_virtualizable_hierarchy = True self.accessor = FieldListAccessor() else: self.top_of_virtualizable_hierarchy = False def _setup_repr_llfields(self): - raise NotImplementedError + llfields = [] + if self.top_of_virtualizable_hierarchy: + llfields.append(('vable_token', llmemory.GCREF)) + return llfields def _setup_repr(self): if self.top_of_virtualizable_hierarchy: hints = {'virtualizable2_accessor': self.accessor} llfields = self._setup_repr_llfields() if llfields: - self._super()._setup_repr(llfields, hints = hints) + self._super()._setup_repr(llfields, hints=hints) else: - self._super()._setup_repr(hints = hints) + self._super()._setup_repr(hints=hints) c_vfields = self.classdef.classdesc.classdict['_virtualizable2_'] self.my_redirected_fields = self._parse_field_list(c_vfields.value, self.accessor) @@ -40,7 +45,7 @@ self.my_redirected_fields = self.rbase.my_redirected_fields def hook_access_field(self, vinst, cname, llops, flags): - #if not flags.get('access_directly'): + # if not flags.get('access_directly'): if self.my_redirected_fields.get(cname.value): cflags = inputconst(lltype.Void, flags) llops.genop('jit_force_virtualizable', [vinst, cname, cflags]) diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -21,8 +21,7 @@ return None if name in ('rclass', 'rpbc', 'rbuiltin', 'rtuple', 'rlist', 'rslice', 'rdict', 'rrange', 'rstr', - 'll_str', 'rbuilder', 'rvirtualizable2', 'rbytearray', - 'exceptiondata'): + 'll_str', 'rbuilder', 'rbytearray', 'exceptiondata'): mod = load(name) if mod is not None: setattr(self, name, mod) From noreply at buildbot.pypy.org Mon Jul 29 09:15:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 09:15:30 +0200 (CEST) Subject: [pypy-commit] cffi default: issue #94: Document Message-ID: <20130729071530.298CB1C32A8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1298:d0ab3b159a1f Date: 2013-07-29 09:15 +0200 http://bitbucket.org/cffi/cffi/changeset/d0ab3b159a1f/ Log: issue #94: Document diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1216,7 +1216,10 @@ ``struct foo_s``, return its "address", as a cdata whose type is ``struct foo_s *``. Also works on unions, but not on any other type. (It would be difficult because only structs and unions are internally -stored as an indirect pointer to the data.) If ``field`` is given, +stored as an indirect pointer to the data. If you need a C int whose +address can be taken, use ``ffi.new("int[1]")`` in the first place; +similarly, if it's a C pointer, use ``ffi.new("foo_t *[1]")``.) +If ``field`` is given, returns the address of that field in the structure. The returned pointer is only valid as long as the original ``cdata`` object is; be sure to keep it alive if it was obtained directly from ``ffi.new()``. From noreply at buildbot.pypy.org Mon Jul 29 09:33:03 2013 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 29 Jul 2013 09:33:03 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: revert setting cppflags and ldflagsto original appraoch, this doesnt break setuptools monkeypatching distutils Message-ID: <20130729073303.853221C32B4@cobra.cs.uni-duesseldorf.de> Author: Paweł Piotr Przeradowski Branch: release-2.1.x Changeset: r65766:522f7674a168 Date: 2013-07-28 01:08 +0200 http://bitbucket.org/pypy/pypy/changeset/522f7674a168/ Log: revert setting cppflags and ldflagsto original appraoch, this doesnt break setuptools monkeypatching distutils (transplanted from 9fad3a8b420858513f1356f3d82f6e4b7f377e6e) diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( From noreply at buildbot.pypy.org Mon Jul 29 09:33:04 2013 From: noreply at buildbot.pypy.org (bivab) Date: Mon, 29 Jul 2013 09:33:04 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: bump version numbers Message-ID: <20130729073304.C94A21C32B4@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: release-2.1.x Changeset: r65767:6fdd7202b805 Date: 2013-07-29 09:30 +0200 http://bitbucket.org/pypy/pypy/changeset/6fdd7202b805/ Log: bump version numbers diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.3" /* PyPy version as a string */ -#define PYPY_VERSION "2.1.0-beta2" +#define PYPY_VERSION "2.1.0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -11,7 +11,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 1, 0, "beta", 2) #XXX # sync patchlevel.h +PYPY_VERSION = (2, 1, 0, "final", 0) #XXX # sync patchlevel.h if platform.name == 'msvc': COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600) From noreply at buildbot.pypy.org Mon Jul 29 10:29:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 10:29:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix 74ec2abeb333: in particular, the line Message-ID: <20130729082913.56B811C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65768:a89ed91dc553 Date: 2013-07-29 10:28 +0200 http://bitbucket.org/pypy/pypy/changeset/a89ed91dc553/ Log: Fix 74ec2abeb333: in particular, the line if infobits | T_HAS_GCPTR_IN_VARSIZE... is equivalent to "if True", so the shortcut was never taken. diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -991,9 +991,12 @@ # after a minor or major collection, no object should be in the nursery ll_assert(not self.is_in_nursery(obj), "object in nursery after collection") - # similarily, all objects should have this flag: - ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0, - "missing GCFLAG_TRACK_YOUNG_PTRS") + # similarily, all objects should have this flag, except if they + # don't have any GC pointer + typeid = self.get_type_id(obj) + if self.has_gcptr(typeid): + ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0, + "missing GCFLAG_TRACK_YOUNG_PTRS") # the GCFLAG_VISITED should not be set between collections ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0, "unexpected GCFLAG_VISITED") diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py --- a/rpython/memory/gctypelayout.py +++ b/rpython/memory/gctypelayout.py @@ -203,6 +203,8 @@ offsets = offsets_to_gc_pointers(TYPE) infobits = index info.ofstoptrs = builder.offsets2table(offsets, TYPE) + if len(offsets) > 0: + infobits |= T_HAS_GCPTR # fptrs = builder.special_funcptr_for_type(TYPE) if fptrs: @@ -216,7 +218,7 @@ infobits |= T_HAS_FINALIZER | T_HAS_LIGHTWEIGHT_FINALIZER if "custom_trace" in fptrs: extra.customtracer = fptrs["custom_trace"] - infobits |= T_HAS_CUSTOM_TRACE + infobits |= T_HAS_CUSTOM_TRACE | T_HAS_GCPTR info.extra = extra # if not TYPE._is_varsize(): @@ -249,15 +251,13 @@ else: offsets = () if len(offsets) > 0: - infobits |= T_HAS_GCPTR_IN_VARSIZE + infobits |= T_HAS_GCPTR_IN_VARSIZE | T_HAS_GCPTR varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF) varinfo.varitemsize = llmemory.sizeof(ARRAY.OF) if builder.is_weakref_type(TYPE): infobits |= T_IS_WEAKREF if is_subclass_of_object(TYPE): infobits |= T_IS_RPYTHON_INSTANCE - if infobits | T_HAS_GCPTR_IN_VARSIZE or offsets: - infobits |= T_HAS_GCPTR info.infobits = infobits | T_KEY_VALUE # ____________________________________________________________ From noreply at buildbot.pypy.org Mon Jul 29 11:28:59 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 29 Jul 2013 11:28:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: remove a jump Message-ID: <20130729092859.6ACF41C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65769:646270f9f8db Date: 2013-07-26 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/646270f9f8db/ Log: remove a jump diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2232,7 +2232,7 @@ mc.CMP_rb(X86_64_SCRATCH_REG.value, StmGC.H_REVISION) else: mc.CMP(X86_64_SCRATCH_REG, mem(loc_base, StmGC.H_REVISION)) - + # if isinstance(descr, STMReadBarrierDescr): # jump to end if h_rev==priv_rev mc.J_il8(rx86.Conditions['Z'], 0) # patched below @@ -2268,19 +2268,15 @@ mc.TEST8_bi(StmGC.H_TID + off, flag) else: mc.TEST8_mi((loc_base.value, StmGC.H_TID + off), flag) - mc.J_il8(rx86.Conditions['NZ'], 0) # patched below - jnz_location2 = mc.get_relative_pos() - - # jump to end - mc.JMP_l8(0) # patched below + + mc.J_il8(rx86.Conditions['Z'], 0) # patched below jz_location = mc.get_relative_pos() + # both conditions succeeded, jump to end # jump target slowpath: offset = mc.get_relative_pos() - jnz_location - offset2 = mc.get_relative_pos() - jnz_location2 assert 0 < offset <= 127 mc.overwrite(jnz_location - 1, chr(offset)) - mc.overwrite(jnz_location2 - 1, chr(offset2)) # # SLOWPATH_START # From noreply at buildbot.pypy.org Mon Jul 29 11:29:00 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 29 Jul 2013 11:29:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: attempt to implement ptr_eq fastpath Message-ID: <20130729092900.CA88D1C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65770:6317510aa84e Date: 2013-07-29 08:38 +0200 http://bitbucket.org/pypy/pypy/changeset/6317510aa84e/ Log: attempt to implement ptr_eq fastpath diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -395,7 +395,7 @@ addr = int(m.group(1), 16) addrs.setdefault(addr, []).append(name) dumps = {} - executables = set() + executables = set(["??"]) symbols = {} for entry in extract_category(log, 'jit-backend-dump'): entry = purge_thread_numbers(entry) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1042,7 +1042,6 @@ assert self.cpu.gc_ll_descr.stm rl = result_loc.lowest8bits() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - self.mc.TEST_rr(eax.value, eax.value) self.mc.SET_ir(rx86.Conditions['NZ'], rl.value) self.mc.MOVZX8_rr(result_loc.value, rl.value) @@ -1052,7 +1051,6 @@ assert self.cpu.gc_ll_descr.stm rl = result_loc.lowest8bits() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - self.mc.TEST_rr(eax.value, eax.value) self.mc.SET_ir(rx86.Conditions['Z'], rl.value) self.mc.MOVZX8_rr(result_loc.value, rl.value) @@ -1064,7 +1062,6 @@ assert not self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - self.mc.TEST_rr(eax.value, eax.value) if guard_opnum == rop.GUARD_FALSE: self.implement_guard(guard_token, "Z") else: @@ -1078,7 +1075,6 @@ assert not self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) - self.mc.TEST_rr(eax.value, eax.value) if guard_opnum == rop.GUARD_FALSE: self.implement_guard(guard_token, "NZ") else: @@ -2173,6 +2169,34 @@ assert self.ptr_eq_slowpath is not None a_base = arglocs[0] b_base = arglocs[1] + + # + # FASTPATH + # + # a == b -> SET NZ + sl = X86_64_SCRATCH_REG.lowest8bits() + mc.MOV(X86_64_SCRATCH_REG, a_base) + mc.CMP(X86_64_SCRATCH_REG, b_base) + mc.SET_ir(rx86.Conditions['Z'], sl.value) + mc.MOVZX8_rr(X86_64_SCRATCH_REG.value, sl.value) + mc.TEST(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) + mc.J_il8(rx86.Conditions['NZ'], 0) + j_ok1 = mc.get_relative_pos() + + # a == 0 || b == 0 -> SET Z + mc.CMP(a_base, imm(0)) + mc.J_il8(rx86.Conditions['Z'], 0) + j_ok2 = mc.get_relative_pos() + # + mc.CMP(a_base, imm(0)) + mc.J_il8(rx86.Conditions['Z'], 0) + j_ok3 = mc.get_relative_pos() + + # a.type != b.type + # XXX: todo, if it ever happens.. + + # + # SLOWPATH # mc.PUSH(b_base) mc.PUSH(a_base) @@ -2180,7 +2204,22 @@ mc.CALL(imm(func)) # result still on stack assert isinstance(result_loc, RegLoc) - mc.POP_r(result_loc.value) + mc.POP_r(X86_64_SCRATCH_REG.value) + # set flags: + mc.TEST(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) + # + # END SLOWPATH + # + + # OK: flags already set + offset = mc.get_relative_pos() - j_ok1 + mc.overwrite(j_ok1 - 1, chr(offset)) + offset = mc.get_relative_pos() - j_ok2 + mc.overwrite(j_ok2 - 1, chr(offset)) + offset = mc.get_relative_pos() - j_ok3 + mc.overwrite(j_ok3 - 1, chr(offset)) + + def _get_stm_private_rev_num_addr(self): assert self.cpu.gc_ll_descr.stm From noreply at buildbot.pypy.org Mon Jul 29 11:29:02 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 29 Jul 2013 11:29:02 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: Merge Message-ID: <20130729092902.92BEF1C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65771:ed40f1172b80 Date: 2013-07-29 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/ed40f1172b80/ Log: Merge diff --git a/rpython/memory/gc/stmgc.py b/rpython/memory/gc/stmgc.py --- a/rpython/memory/gc/stmgc.py +++ b/rpython/memory/gc/stmgc.py @@ -44,15 +44,16 @@ GCFLAG_PREBUILT_ORIGINAL = first_gcflag << 3 GCFLAG_PUBLIC_TO_PRIVATE = first_gcflag << 4 GCFLAG_WRITE_BARRIER = first_gcflag << 5 # stmgc.h - GCFLAG_NURSERY_MOVED = first_gcflag << 6 + GCFLAG_MOVED = first_gcflag << 6 GCFLAG_BACKUP_COPY = first_gcflag << 7 # debug GCFLAG_STUB = first_gcflag << 8 # debug GCFLAG_PRIVATE_FROM_PROTECTED = first_gcflag << 9 GCFLAG_HAS_ID = first_gcflag << 10 GCFLAG_IMMUTABLE = first_gcflag << 11 GCFLAG_SMALLSTUB = first_gcflag << 12 + GCFLAG_MARKED = first_gcflag << 13 - PREBUILT_FLAGS = first_gcflag * (1 + 2 + 4 + 8) + PREBUILT_FLAGS = first_gcflag * ((1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<13)) PREBUILT_REVISION = r_uint(1) FX_MASK = 65535 @@ -109,7 +110,7 @@ # XXX finalizers are ignored for now #ll_assert(not needs_finalizer, 'XXX needs_finalizer') #ll_assert(not is_finalizer_light, 'XXX is_finalizer_light') - #ll_assert(not contains_weakptr, 'XXX contains_weakptr') + ll_assert(not contains_weakptr, 'contains_weakptr: use malloc_weakref') # XXX call optimized versions, e.g. if size < GC_NURSERY_SECTION return llop.stm_allocate(llmemory.GCREF, size, typeid16) @@ -131,12 +132,14 @@ seen by the GC, then it can get collected.""" tid = self.get_hdr_tid(obj)[0] if bool(tid & self.GCFLAG_OLD): - return False + return False # XXX wrong so far. We should add a flag to the + # object that means "don't ever kill this copy" return True @classmethod def JIT_max_size_of_young_obj(cls): + # XXX there is actually a maximum, check return None @classmethod diff --git a/rpython/translator/stm/src_stm/dbgmem.c b/rpython/translator/stm/src_stm/dbgmem.c --- a/rpython/translator/stm/src_stm/dbgmem.c +++ b/rpython/translator/stm/src_stm/dbgmem.c @@ -9,7 +9,7 @@ #ifdef _GC_DEBUG /************************************************************/ -#define MMAP_TOTAL 671088640 /* 640MB */ +#define MMAP_TOTAL 1280*1024*1024 /* 1280MB */ static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; static char *zone_start, *zone_current = NULL, *zone_end = NULL; @@ -71,6 +71,10 @@ void stm_free(void *p, size_t sz) { + if (p == NULL) { + assert(sz == 0); + return; + } assert(((intptr_t)((char *)p + sz) & (PAGE_SIZE-1)) == 0); size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; @@ -84,6 +88,14 @@ _stm_dbgmem(p, sz, PROT_NONE); } +void *stm_realloc(void *p, size_t newsz, size_t oldsz) +{ + void *r = stm_malloc(newsz); + memcpy(r, p, oldsz < newsz ? oldsz : newsz); + stm_free(p, oldsz); + return r; +} + int _stm_can_access_memory(char *p) { long base = ((char *)p - zone_start) / PAGE_SIZE; diff --git a/rpython/translator/stm/src_stm/dbgmem.h b/rpython/translator/stm/src_stm/dbgmem.h --- a/rpython/translator/stm/src_stm/dbgmem.h +++ b/rpython/translator/stm/src_stm/dbgmem.h @@ -7,6 +7,7 @@ void *stm_malloc(size_t); void stm_free(void *, size_t); +void *stm_realloc(void *, size_t, size_t); int _stm_can_access_memory(char *); void assert_cleared(char *, size_t); @@ -14,6 +15,7 @@ #define stm_malloc(sz) malloc(sz) #define stm_free(p,sz) free(p) +#define stm_realloc(p,newsz,oldsz) realloc(p,newsz) #define assert_cleared(p,sz) do { } while(0) #endif diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c --- a/rpython/translator/stm/src_stm/et.c +++ b/rpython/translator/stm/src_stm/et.c @@ -146,7 +146,7 @@ gcptr P_prev = P; P = (gcptr)v; assert((P->h_tid & GCFLAG_PUBLIC) || - (P_prev->h_tid & GCFLAG_NURSERY_MOVED)); + (P_prev->h_tid & GCFLAG_MOVED)); v = ACCESS_ONCE(P->h_revision); @@ -238,7 +238,7 @@ add_in_recent_reads_cache: /* The risks are that the following assert fails, because the flag was added just now by a parallel thread during stealing... */ - /*assert(!(P->h_tid & GCFLAG_NURSERY_MOVED));*/ + /*assert(!(P->h_tid & GCFLAG_MOVED));*/ fxcache_add(&d->recent_reads_cache, P); return P; @@ -281,7 +281,7 @@ */ if (P->h_tid & GCFLAG_PUBLIC) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) { P = (gcptr)P->h_revision; assert(P->h_tid & GCFLAG_PUBLIC); @@ -413,7 +413,7 @@ while (v = P->h_revision, IS_POINTER(v)) { - if (P->h_tid & GCFLAG_NURSERY_MOVED) + if (P->h_tid & GCFLAG_MOVED) dprintf(("nursery_moved ")); if (v & 2) @@ -510,7 +510,7 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { assert(R->h_tid & GCFLAG_PUBLIC); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); #ifdef _GC_DEBUG wlog_t *entry; @@ -570,6 +570,13 @@ gcptr stm_WriteBarrier(gcptr P) { assert(!(P->h_tid & GCFLAG_IMMUTABLE)); + assert((P->h_tid & GCFLAG_STUB) || + stmgc_size(P) > sizeof(struct stm_stub_s) - WORD); + /* If stmgc_size(P) gives a number <= sizeof(stub)-WORD, then there is a + risk of overrunning the object later in gcpage.c when copying a stub + over it. However such objects are so small that they contain no field + at all, and so no write barrier should occur on them. */ + if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into @@ -606,7 +613,7 @@ Add R into the list 'public_with_young_copy', unless W is actually an old object, in which case we need to record W. */ - if (R->h_tid & GCFLAG_NURSERY_MOVED) + if (R->h_tid & GCFLAG_MOVED) { /* Bah, the object turned into this kind of stub, possibly while we were waiting for the collection_lock, because it @@ -696,8 +703,8 @@ continue; } } - else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) - == (GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED)) + else if ((R->h_tid & (GCFLAG_PUBLIC | GCFLAG_MOVED)) + == (GCFLAG_PUBLIC | GCFLAG_MOVED)) { /* such an object is identical to the one it points to (stolen protected young object with h_revision pointing @@ -970,6 +977,7 @@ revision_t my_lock = d->my_lock; wlog_t *item; + dprintf(("acquire_locks\n")); assert(!stm_has_got_any_lock(d)); assert(d->public_descriptor->stolen_objects.size == 0); @@ -982,6 +990,7 @@ revision_t v; retry: assert(R->h_tid & GCFLAG_PUBLIC); + assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); v = ACCESS_ONCE(R->h_revision); if (IS_POINTER(v)) /* "has a more recent revision" */ { @@ -1014,7 +1023,7 @@ static void CancelLocks(struct tx_descriptor *d) { wlog_t *item; - + dprintf(("cancel_locks\n")); if (!g2l_any_entry(&d->public_to_private)) return; @@ -1107,7 +1116,7 @@ assert(!(L->h_tid & GCFLAG_VISITED)); assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - assert(!(L->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(L->h_tid & GCFLAG_MOVED)); assert(L->h_revision != localrev); /* modified by AcquireLocks() */ #ifdef DUMP_EXTRA @@ -1119,7 +1128,9 @@ gcptr stub = stm_stub_malloc(d->public_descriptor, 0); stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB + | GCFLAG_SMALLSTUB | GCFLAG_OLD; + dprintf(("et.c: stm_stub_malloc -> %p\n", stub)); stub->h_revision = ((revision_t)L) | 2; assert(!(L->h_tid & GCFLAG_HAS_ID)); @@ -1154,7 +1165,7 @@ assert(R->h_tid & GCFLAG_PUBLIC); assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(R->h_tid & GCFLAG_MOVED)); assert(R->h_revision != localrev); #ifdef DUMP_EXTRA @@ -1249,7 +1260,7 @@ assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); P->h_tid |= GCFLAG_PUBLIC; assert(!(P->h_tid & GCFLAG_HAS_ID)); - if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_NURSERY_MOVED; + if (!(P->h_tid & GCFLAG_OLD)) P->h_tid |= GCFLAG_MOVED; /* P becomes a public outdated object. It may create an exception documented in doc-objects.txt: a public but young object. It's still fine because it should only be seen by @@ -1282,7 +1293,7 @@ revision_t cur_time; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - + dprintf(("CommitTransaction(%p)\n", d)); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); @@ -1366,6 +1377,7 @@ d->active = 2; d->reads_size_limit_nonatomic = 0; update_reads_size_limit(d); + dprintf(("make_inevitable(%p)\n", d)); } static revision_t acquire_inev_mutex_and_mark_global_cur_time( diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h --- a/rpython/translator/stm/src_stm/et.h +++ b/rpython/translator/stm/src_stm/et.h @@ -26,7 +26,11 @@ * * GCFLAG_OLD is set on old objects. * - * GCFLAG_VISITED is used temporarily during major collections. + * GCFLAG_VISITED and GCFLAG_MARKED are used temporarily during major + * collections. The objects are MARKED|VISITED as soon as they have been + * added to 'objects_to_trace', and so will be or have been traced. The + * objects are only MARKED if their memory must be kept alive, but (so far) + * we found that tracing them is not useful. * * GCFLAG_PUBLIC is set on public objects. * @@ -47,7 +51,7 @@ * the list 'old_objects_to_trace'; it is set again at the next minor * collection. * - * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. + * GCFLAG_MOVED is used temporarily during minor/major collections. * * GCFLAG_STUB is set for debugging on stub objects made by stealing or * by major collections. 'p_stub->h_revision' might be a value @@ -68,16 +72,20 @@ static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; // in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; -static const revision_t GCFLAG_NURSERY_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6; static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10; static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11; +static const revision_t GCFLAG_SMALLSTUB /*debug*/ = STM_FIRST_GCFLAG << 12; +static const revision_t GCFLAG_MARKED = STM_FIRST_GCFLAG << 13; +/* warning, the last flag available is "<< 15" on 32-bit */ /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ + GCFLAG_MARKED | \ GCFLAG_PREBUILT_ORIGINAL | \ GCFLAG_OLD | \ GCFLAG_PUBLIC) @@ -88,12 +96,14 @@ "PREBUILT_ORIGINAL", \ "PUBLIC_TO_PRIVATE", \ "WRITE_BARRIER", \ - "NURSERY_MOVED", \ + "MOVED", \ "BACKUP_COPY", \ "STUB", \ "PRIVATE_FROM_PROTECTED", \ - "HAS_ID", \ - "IMMUTABLE", \ + "HAS_ID", \ + "IMMUTABLE", \ + "SMALLSTUB", \ + "MARKED", \ NULL } #define IS_POINTER(v) (!((v) & 1)) /* even-valued number */ diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c --- a/rpython/translator/stm/src_stm/extra.c +++ b/rpython/translator/stm/src_stm/extra.c @@ -132,17 +132,16 @@ _Bool stm_pointer_equal(gcptr p1, gcptr p2) { - /* fast path for two equal pointers */ - if (p1 == p2) - return 1; - /* if p1 or p2 is NULL (but not both, because they are different - pointers), then return 0 */ - if (p1 == NULL || p2 == NULL) - return 0; - /* types must be the same */ - if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK)) - return 0; - return stm_id(p1) == stm_id(p2); + if (p1 != NULL && p2 != NULL) { + /* resolve h_original, but only if !PREBUILT_ORIGINAL */ + if (p1->h_original && !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p1 = (gcptr)p1->h_original; + } + if (p2->h_original && !(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + p2 = (gcptr)p2->h_original; + } + } + return (p1 == p2); } /************************************************************/ diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c --- a/rpython/translator/stm/src_stm/gcpage.c +++ b/rpython/translator/stm/src_stm/gcpage.c @@ -213,157 +213,229 @@ static struct GcPtrList objects_to_trace; -static void keep_original_alive(gcptr obj) +static gcptr copy_over_original(gcptr obj, gcptr id_copy) { - /* keep alive the original of a visited object */ - gcptr id_copy = (gcptr)obj->h_original; - /* prebuilt original objects may have a predifined - hash in h_original */ - if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; - /* see fix_outdated() */ - if (!(id_copy->h_tid & GCFLAG_VISITED)) { - id_copy->h_tid |= GCFLAG_VISITED; + assert(obj != id_copy); + assert(id_copy == (gcptr)obj->h_original); + assert(!(id_copy->h_revision & 1)); /* not head-revision itself */ - /* XXX: may not always need tracing? */ - if (!(id_copy->h_tid & GCFLAG_STUB)) - gcptrlist_insert(&objects_to_trace, id_copy); - } - } - else { - /* prebuilt originals won't get collected anyway - and if they are not reachable in any other way, - we only ever need their location, not their content */ + /* check a few flags */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + + assert(id_copy->h_tid & GCFLAG_PUBLIC); + assert(!(id_copy->h_tid & GCFLAG_BACKUP_COPY)); + + /* id_copy may be a stub, but in this case, as the original, it + should have been allocated with a big enough chunk of memory. + Also, obj itself might be a stub. */ + assert(!(id_copy->h_tid & GCFLAG_SMALLSTUB)); + if (!(id_copy->h_tid & GCFLAG_STUB) && !(obj->h_tid & GCFLAG_STUB)) { + assert(stmgc_size(id_copy) == stmgc_size(obj)); + } + + /* add the MOVED flag to 'obj' */ + obj->h_tid |= GCFLAG_MOVED; + + /* copy the object's content */ + size_t objsize; + if (obj->h_tid & GCFLAG_STUB) + objsize = sizeof(struct stm_stub_s); + else { + objsize = stmgc_size(obj); + assert(objsize > sizeof(struct stm_stub_s) - WORD); + } + dprintf(("copy %p over %p (%zd bytes)\n", obj, id_copy, objsize)); + memcpy(id_copy + 1, obj + 1, objsize - sizeof(struct stm_object_s)); + + /* copy the object's h_revision number */ + id_copy->h_revision = obj->h_revision; + + /* copy the STUB flag */ + id_copy->h_tid &= ~GCFLAG_STUB; + id_copy->h_tid |= (obj->h_tid & GCFLAG_STUB); + + return id_copy; +} + +static void visit_nonpublic(gcptr obj, struct tx_public_descriptor *gcp) +{ + /* Visit a protected or private object. 'gcp' must be either NULL or + point to the thread that has got the object. This 'gcp' is only an + optimization: it lets us trace (most) private/protected objects + and replace pointers to public objects in them with pointers to + private/protected objects if they are the most recent ones, + provided they belong to the same thread. + */ + assert(!(obj->h_tid & GCFLAG_PUBLIC)); + assert(!(obj->h_tid & GCFLAG_STUB)); + assert(!(obj->h_tid & GCFLAG_HAS_ID)); + assert(!(obj->h_tid & GCFLAG_SMALLSTUB)); + assert(!(obj->h_tid & GCFLAG_MOVED)); + + if (obj->h_tid & GCFLAG_VISITED) + return; /* already visited */ + + obj->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; + gcptrlist_insert2(&objects_to_trace, obj, (gcptr)gcp); + + obj = (gcptr)obj->h_original; + if (obj != NULL) + obj->h_tid |= GCFLAG_MARKED; +} + +static gcptr visit_public(gcptr obj, struct tx_public_descriptor *gcp) +{ + /* The goal is to walk to the most recent copy, then copy its + content back into the h_original, and finally returns this + h_original. Or, if gcp != NULL and the most recent copy is + protected by precisely 'gcp', then we return it instead. + */ + assert(obj->h_tid & GCFLAG_PUBLIC); + assert(!(obj->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + + gcptr original; + if (obj->h_original != 0 && + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + original = (gcptr)obj->h_original; + /* the h_original may be protected, or private_from_protected, + in some cases. Then we can't use it. We'll use the most + recent h_revision which is public. */ + if (!(original->h_tid & GCFLAG_PUBLIC)) { + original->h_tid |= GCFLAG_MARKED; + original = NULL; } } + else + original = obj; + + /* the original object must not be a small stub. */ + assert(original == NULL || !(original->h_tid & GCFLAG_SMALLSTUB)); + + /* if 'original' was already visited, we are done */ + if (original != NULL && original->h_tid & GCFLAG_VISITED) + return original; + + /* walk to the head of the chained list */ + while (IS_POINTER(obj->h_revision)) { + if (!(obj->h_revision & 2)) { + obj = (gcptr)obj->h_revision; + assert(obj->h_tid & GCFLAG_PUBLIC); + continue; + } + + /* it's a stub: check the current stealing status */ + assert(obj->h_tid & GCFLAG_STUB); + gcptr obj2 = (gcptr)(obj->h_revision - 2); + + if (obj2->h_tid & GCFLAG_PUBLIC) { + /* the stub target itself was stolen, so is public now. + Continue looping from there. */ + obj = obj2; + continue; + } + + if (obj2->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* the stub target is a private_from_protected. */ + gcptr obj3 = (gcptr)obj2->h_revision; + if (obj3->h_tid & GCFLAG_PUBLIC) { + assert(!(obj3->h_tid & GCFLAG_BACKUP_COPY)); + /* the backup copy was stolen and is now a regular + public object. */ + obj = obj3; + continue; + } + else { + /* the backup copy was not stolen. Ignore this pair + obj2/obj3, and the head of the public chain is obj. + The pair obj2/obj3 was or will be handled by + mark_all_stack_roots(). */ + assert(obj3->h_tid & GCFLAG_BACKUP_COPY); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; + break; + } + } + else { + /* the stub target is just a protected object. + The head of the public chain is obj. We have to + explicitly keep obj2 alive. */ + assert(!IS_POINTER(obj2->h_revision)); + visit_nonpublic(obj2, STUB_THREAD(obj)); + + assert(STUB_THREAD(obj) != NULL); + if (STUB_THREAD(obj) == gcp) + return obj2; + break; + } + } + + /* at this point, 'obj' contains the most recent revision which is + public. */ + if (original == NULL) { + original = obj; + if (original->h_tid & GCFLAG_VISITED) + return original; + } + else if (obj != original) { + /* copy obj over original */ + copy_over_original(obj, original); + } + + /* return this original */ + original->h_tid |= GCFLAG_VISITED | GCFLAG_MARKED; + if (!(original->h_tid & GCFLAG_STUB)) + gcptrlist_insert2(&objects_to_trace, original, NULL); + return original; } -static void visit(gcptr *pobj); +static struct tx_public_descriptor *visit_protected_gcp; -gcptr stmgcpage_visit(gcptr obj) +static void visit_take_protected(gcptr *pobj) { - visit(&obj); - return obj; -} - -static void visit(gcptr *pobj) -{ + /* Visits '*pobj', marking it as surviving and possibly adding it to + objects_to_trace. Fixes *pobj to point to the exact copy that + survived. This function will replace *pobj with a protected + copy if it belongs to the thread 'visit_protected_gcp', so the + latter must be initialized before any call! + */ gcptr obj = *pobj; if (obj == NULL) return; - restart: - if (obj->h_revision & 1) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_STUB)); - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); - - keep_original_alive(obj); - } - } - else if (obj->h_tid & GCFLAG_PUBLIC) { - /* h_revision is a ptr: we have a more recent version */ - gcptr prev_obj = obj; - - if (!(obj->h_revision & 2)) { - /* go visit the more recent version */ - obj = (gcptr)obj->h_revision; - } - else { - /* it's a stub: keep it if it points to a protected version, - because we need to keep the effect of stealing if it is - later accessed by the wrong thread. If it points to a - public object (possibly outdated), we can ignore the stub. - */ - assert(obj->h_tid & GCFLAG_STUB); - obj = (gcptr)(obj->h_revision - 2); - if (!(obj->h_tid & GCFLAG_PUBLIC)) { - prev_obj->h_tid |= GCFLAG_VISITED; - keep_original_alive(prev_obj); - - assert(*pobj == prev_obj); - /* recursion, but should be only once */ - obj = stmgcpage_visit(obj); - assert(prev_obj->h_tid & GCFLAG_STUB); - prev_obj->h_revision = ((revision_t)obj) | 2; - return; - } - } - - if (!(obj->h_revision & 3)) { - /* obj is neither a stub nor a most recent revision: - completely ignore obj->h_revision */ - - obj = (gcptr)obj->h_revision; - assert(obj->h_tid & GCFLAG_PUBLIC); - prev_obj->h_revision = (revision_t)obj; - } - *pobj = obj; - goto restart; - } - else if (obj->h_tid & GCFLAG_VISITED) { - dprintf(("[already visited: %p]\n", obj)); - assert(obj == *pobj); - assert((obj->h_revision & 3) || /* either odd, or stub */ - (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - return; /* already seen */ + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + /* 'obj' is a private or protected copy. */ + visit_nonpublic(obj, visit_protected_gcp); } else { - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - gcptr B = (gcptr)obj->h_revision; - assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - - if (obj->h_original && (gcptr)obj->h_original != B) { - /* if B is original, it will be visited anyway */ - assert(obj->h_original == B->h_original); - assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - keep_original_alive(obj); - } - - obj->h_tid |= GCFLAG_VISITED; - B->h_tid |= GCFLAG_VISITED; - assert(!(obj->h_tid & GCFLAG_STUB)); - assert(!(B->h_tid & GCFLAG_STUB)); - gcptrlist_insert2(&objects_to_trace, obj, B); - - if (IS_POINTER(B->h_revision)) { - assert(B->h_tid & GCFLAG_PUBLIC); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(B->h_revision & 2)); - - pobj = (gcptr *)&B->h_revision; - obj = *pobj; - goto restart; - } + *pobj = visit_public(obj, visit_protected_gcp); } } - -static void visit_keep(gcptr obj) +gcptr stmgcpage_visit(gcptr obj) { - if (!(obj->h_tid & GCFLAG_VISITED)) { - obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */ - obj->h_tid |= GCFLAG_VISITED; - gcptrlist_insert(&objects_to_trace, obj); - - if (IS_POINTER(obj->h_revision)) { - assert(!(obj->h_revision & 2)); - visit((gcptr *)&obj->h_revision); - } - keep_original_alive(obj); + if (!(obj->h_tid & GCFLAG_PUBLIC)) { + visit_nonpublic(obj, NULL); } + else { + obj = visit_public(obj, NULL); + } + return obj; } static void visit_all_objects(void) { while (gcptrlist_size(&objects_to_trace) > 0) { + visit_protected_gcp = + (struct tx_public_descriptor *)gcptrlist_pop(&objects_to_trace); gcptr obj = gcptrlist_pop(&objects_to_trace); - stmgc_trace(obj, &visit); + stmgc_trace(obj, &visit_take_protected); } + visit_protected_gcp = NULL; } static void mark_prebuilt_roots(void) @@ -371,18 +443,20 @@ /* Note about prebuilt roots: 'stm_prebuilt_gcroots' is a list that contains all the ones that have been modified. Because they are themselves not in any page managed by this file, their - GCFLAG_VISITED will not be removed at the end of the current - collection. This is fine because the base object cannot contain - references to the heap. So we decided to systematically set - GCFLAG_VISITED on prebuilt objects. */ + GCFLAG_VISITED is not removed at the end of the current + collection. That's why we remove it here. GCFLAG_MARKED is not + relevant for prebuilt objects, but we avoid objects with MARKED + but not VISITED, which trigger some asserts. */ gcptr *pobj = stm_prebuilt_gcroots.items; gcptr *pend = stm_prebuilt_gcroots.items + stm_prebuilt_gcroots.size; - gcptr obj; + gcptr obj, obj2; for (; pobj != pend; pobj++) { obj = *pobj; + obj->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); assert(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL); - assert(IS_POINTER(obj->h_revision)); - visit((gcptr *)&obj->h_revision); + + obj2 = visit_public(obj, NULL); + assert(obj2 == obj); /* it is its own original */ } } @@ -396,7 +470,7 @@ if (((revision_t)item) & ~((revision_t)END_MARKER_OFF | (revision_t)END_MARKER_ON)) { /* 'item' is a regular, non-null pointer */ - visit(root); + visit_take_protected(root); dprintf(("visit stack root: %p -> %p\n", item, *root)); } else if (item == END_MARKER_OFF) { @@ -409,15 +483,19 @@ static void mark_all_stack_roots(void) { struct tx_descriptor *d; + struct GcPtrList new_public_to_private; + memset(&new_public_to_private, 0, sizeof(new_public_to_private)); + for (d = stm_tx_head; d; d = d->tx_next) { assert(!stm_has_got_any_lock(d)); + visit_protected_gcp = d->public_descriptor; /* the roots pushed on the shadowstack */ mark_roots(d->shadowstack, *d->shadowstack_end_ref); /* the thread-local object */ - visit(d->thread_local_obj_ref); - visit(&d->old_thread_local_obj); + visit_take_protected(d->thread_local_obj_ref); + visit_take_protected(&d->old_thread_local_obj); /* the current transaction's private copies of public objects */ wlog_t *item; @@ -427,37 +505,50 @@ gcptr R = item->addr; gcptr L = item->val; - /* Objects that were not visited yet must have the PUB_TO_PRIV - flag. Except if that transaction will abort anyway, then it - may be removed from a previous major collection that didn't - fix the PUB_TO_PRIV because the transaction was going to - abort anyway: - 1. minor_collect before major collect (R->L, R is outdated, abort) - 2. major collect removes flag - 3. major collect again, same thread, no time to abort - 4. flag still removed - */ - assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0, - R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - visit_keep(R); + /* we visit the public object R. Must keep a public object + here, so we pass NULL as second argument. */ + gcptr new_R = visit_public(R, NULL); + assert(new_R->h_tid & GCFLAG_PUBLIC); + + if (new_R != R) { + /* we have to update the key in public_to_private, which + can only be done by deleting the existing key and + (after the loop) re-inserting the new key. */ + G2L_LOOP_DELETE(item); + gcptrlist_insert2(&new_public_to_private, new_R, L); + } + + /* we visit the private copy L --- which at this point + should be private, possibly private_from_protected, + so visit() should return the same private copy */ if (L != NULL) { - /* minor collection found R->L in public_to_young - and R was modified. It then sets item->val to NULL and wants - to abort later. */ - revision_t v = L->h_revision; - visit_keep(L); - /* a bit of custom logic here: if L->h_revision used to - point exactly to R, as set by stealing, then we must - keep this property, even though visit_keep(L) might - decide it would be better to make it point to a more - recent copy. */ - if (v == (revision_t)R) { - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - L->h_revision = v; /* restore */ - } + visit_nonpublic(L, visit_protected_gcp); } + } G2L_LOOP_END; + /* reinsert to real pub_to_priv */ + long i, size = new_public_to_private.size; + gcptr *items = new_public_to_private.items; + for (i = 0; i < size; i += 2) { + g2l_insert(&d->public_to_private, items[i], items[i + 1]); + } + gcptrlist_clear(&new_public_to_private); + + /* the current transaction's private copies of protected objects */ + items = d->private_from_protected.items; + for (i = d->private_from_protected.size - 1; i >= 0; i--) { + gcptr obj = items[i]; + assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + visit_nonpublic(obj, visit_protected_gcp); + + gcptr backup_obj = (gcptr)obj->h_revision; + if (!(backup_obj->h_tid & GCFLAG_PUBLIC)) + visit_nonpublic(backup_obj, visit_protected_gcp); + else + obj->h_revision = (revision_t)visit_public(backup_obj, NULL); + } + /* make sure that the other lists are empty */ assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); @@ -473,27 +564,16 @@ assert(gcptrlist_size(&d->private_from_protected) == d->num_private_from_protected_known_old); } + + visit_protected_gcp = NULL; + gcptrlist_delete(&new_public_to_private); } static void cleanup_for_thread(struct tx_descriptor *d) { long i; gcptr *items; - - /* It can occur that 'private_from_protected' contains an object that - * has not been visited at all (maybe only in inevitable - * transactions). - */ - items = d->private_from_protected.items; - for (i = d->private_from_protected.size - 1; i >= 0; i--) { - gcptr obj = items[i]; - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - - if (!(obj->h_tid & GCFLAG_VISITED)) { - /* forget 'obj' */ - items[i] = items[--d->private_from_protected.size]; - } - } + assert(d->old_objects_to_trace.size == 0); /* If we're aborting this transaction anyway, we don't need to do * more here. @@ -516,21 +596,29 @@ items = d->list_of_read_objects.items; for (i = d->list_of_read_objects.size - 1; i >= 0; --i) { gcptr obj = items[i]; - assert(!(obj->h_tid & GCFLAG_STUB)); - /* Warning: in case the object listed is outdated and has been - replaced with a more recent revision, then it might be the - case that obj->h_revision doesn't have GCFLAG_VISITED, but - just removing it is very wrong --- we want 'd' to abort. - */ - if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + if (obj->h_tid & GCFLAG_MOVED) { + assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(IS_POINTER(obj->h_original)); + obj = (gcptr)obj->h_original; + items[i] = obj; + } + else if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* Warning: in case the object listed is outdated and has been + replaced with a more recent revision, then it might be the + case that obj->h_revision doesn't have GCFLAG_VISITED, but + just removing it is very wrong --- we want 'd' to abort. + */ /* follow obj to its backup */ assert(IS_POINTER(obj->h_revision)); obj = (gcptr)obj->h_revision; + + /* the backup-ptr should already be updated: */ + assert(!(obj->h_tid & GCFLAG_MOVED)); } revision_t v = obj->h_revision; - if (IS_POINTER(v)) { + if ((obj->h_tid & GCFLAG_STUB) || IS_POINTER(v)) { /* has a more recent revision. Oups. */ dprintf(("ABRT_COLLECT_MAJOR %p: " "%p was read but modified already\n", d, obj)); @@ -572,7 +660,7 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { assert(item->addr->h_tid & GCFLAG_VISITED); assert(item->val->h_tid & GCFLAG_VISITED); - + assert(!(item->addr->h_tid & GCFLAG_MOVED)); assert(item->addr->h_tid & GCFLAG_PUBLIC); /* assert(is_private(item->val)); but in the other thread, which becomes: */ @@ -611,7 +699,9 @@ and the flag is removed; other locations are marked as free. */ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) break; /* first object that stays alive */ p = (gcptr)(((char *)p) + obj_size); } @@ -621,8 +711,10 @@ surviving_pages = lpage; p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); mc_total_in_use += obj_size; } else { @@ -648,6 +740,7 @@ p = (gcptr)(lpage + 1); for (j = 0; j < objs_per_page; j++) { assert(!(p->h_tid & GCFLAG_VISITED)); + assert(!(p->h_tid & GCFLAG_MARKED)); if (p->h_tid != DEBUG_WORD(0xDD)) { dprintf(("| freeing %p (with page %p)\n", p, lpage)); } @@ -677,8 +770,10 @@ G2L_LOOP_FORWARD(gcp->nonsmall_objects, item) { gcptr p = item->addr; - if (p->h_tid & GCFLAG_VISITED) { - p->h_tid &= ~GCFLAG_VISITED; + assert(IMPLIES(p->h_tid & GCFLAG_VISITED, + p->h_tid & GCFLAG_MARKED)); + if (p->h_tid & GCFLAG_MARKED) { + p->h_tid &= ~(GCFLAG_VISITED | GCFLAG_MARKED); } else { G2L_LOOP_DELETE(item); diff --git a/rpython/translator/stm/src_stm/lists.c b/rpython/translator/stm/src_stm/lists.c --- a/rpython/translator/stm/src_stm/lists.c +++ b/rpython/translator/stm/src_stm/lists.c @@ -19,7 +19,7 @@ void g2l_delete(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); memset(g2l, 0, sizeof(struct G2L)); } @@ -57,7 +57,7 @@ long alloc = g2l->raw_end - g2l->raw_start; long newalloc = (alloc + extra + (alloc >> 2) + 31) & ~15; //fprintf(stderr, "growth: %ld\n", newalloc); - char *newitems = malloc(newalloc); + char *newitems = stm_malloc(newalloc); newg2l.raw_start = newitems; newg2l.raw_current = newitems; newg2l.raw_end = newitems + newalloc; @@ -66,7 +66,7 @@ { g2l_insert(&newg2l, item->addr, item->val); } G2L_LOOP_END; - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); *g2l = newg2l; } @@ -152,7 +152,7 @@ //fprintf(stderr, "list %p deleted (%ld KB)\n", //gcptrlist, gcptrlist->alloc * sizeof(gcptr) / 1024); gcptrlist->size = 0; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = NULL; gcptrlist->alloc = 0; } @@ -163,7 +163,8 @@ return; size_t nsize = gcptrlist->size * sizeof(gcptr); - gcptr *newitems = realloc(gcptrlist->items, nsize); + gcptr *newitems = stm_realloc(gcptrlist->items, nsize, + gcptrlist->alloc * sizeof(gcptr)); if (newitems != NULL || nsize == 0) { gcptrlist->items = newitems; @@ -178,11 +179,11 @@ //fprintf(stderr, "list %p growth to %ld items (%ld KB)\n", // gcptrlist, newalloc, newalloc * sizeof(gcptr) / 1024); - gcptr *newitems = malloc(newalloc * sizeof(gcptr)); + gcptr *newitems = stm_malloc(newalloc * sizeof(gcptr)); long i; for (i=0; isize; i++) newitems[i] = gcptrlist->items[i]; - free(gcptrlist->items); + stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr)); gcptrlist->items = newitems; gcptrlist->alloc = newalloc; } diff --git a/rpython/translator/stm/src_stm/lists.h b/rpython/translator/stm/src_stm/lists.h --- a/rpython/translator/stm/src_stm/lists.h +++ b/rpython/translator/stm/src_stm/lists.h @@ -2,6 +2,8 @@ #ifndef _SRCSTM_LISTS_H #define _SRCSTM_LISTS_H +#include "dbgmem.h" + /************************************************************/ /* The g2l_xx functions ("global_to_local") are implemented as a tree, @@ -37,7 +39,7 @@ void g2l_clear(struct G2L *g2l); void g2l_delete(struct G2L *g2l); static inline void g2l_delete_not_used_any_more(struct G2L *g2l) { - free(g2l->raw_start); + stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start); } static inline int g2l_any_entry(struct G2L *g2l) { diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c --- a/rpython/translator/stm/src_stm/nursery.c +++ b/rpython/translator/stm/src_stm/nursery.c @@ -137,7 +137,7 @@ static inline gcptr create_old_object_copy(gcptr obj) { assert(!(obj->h_tid & GCFLAG_PUBLIC)); - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_MOVED)); assert(!(obj->h_tid & GCFLAG_VISITED)); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); @@ -160,9 +160,9 @@ } else { /* it's a nursery object. Was it already moved? */ - if (UNLIKELY(obj->h_tid & GCFLAG_NURSERY_MOVED)) { + if (UNLIKELY(obj->h_tid & GCFLAG_MOVED)) { /* yes. Such an object can be a public object in the nursery - too (such objects are always NURSERY_MOVED). For all cases, + too (such objects are always MOVED). For all cases, we can just fix the ref. Can be stolen objects or those we already moved. */ @@ -183,7 +183,7 @@ fresh_old_copy = create_old_object_copy(obj); } - obj->h_tid |= GCFLAG_NURSERY_MOVED; + obj->h_tid |= GCFLAG_MOVED; obj->h_revision = (revision_t)fresh_old_copy; /* fix the original reference */ @@ -233,8 +233,23 @@ assert(items[i]->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); assert(IS_POINTER(items[i]->h_revision)); + /* if items[i] is young, move it, update the pointer, and + schedule the object for later consideration by + visit_all_outside_objects() (which will for example ensure + that the WRITE_BARRIER flag is added to it). + */ visit_if_young(&items[i]); + /* the backup copy is always allocated outside the nursery, + but we have to trace it as well, as it may contain its own + young pointers. + + but only once: if the transaction was running for long enough + to have num_private_from_protected_known_old > 0, then the + backup copies of known-old objects have already been traced + in a previous minor collection, and as they are read-only, + they cannot contain young pointers any more. + */ stmgc_trace((gcptr)items[i]->h_revision, &visit_if_young); } @@ -386,13 +401,13 @@ /* non-young or visited young objects are kept */ continue; } - else if (obj->h_tid & GCFLAG_NURSERY_MOVED) { + else if (obj->h_tid & GCFLAG_MOVED) { /* visited nursery objects are kept and updated */ items[i] = (gcptr)obj->h_revision; assert(!(items[i]->h_tid & GCFLAG_STUB)); continue; } - /* Sanity check: a nursery object without the NURSERY_MOVED flag + /* Sanity check: a nursery object without the MOVED flag is necessarily a private-without-backup object, or a protected object; it cannot be a public object. */ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); @@ -432,7 +447,7 @@ setup_minor_collect(d); /* first do this, which asserts that some objects are private --- - which fails if they have already been GCFLAG_NURSERY_MOVED */ + which fails if they have already been GCFLAG_MOVED */ mark_public_to_young(d); mark_young_roots(d); diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -4cad3aa5a20b +c528da482152 diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c --- a/rpython/translator/stm/src_stm/steal.c +++ b/rpython/translator/stm/src_stm/steal.c @@ -31,7 +31,7 @@ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); if (obj->h_tid & GCFLAG_PUBLIC) { /* young public, replace with stolen old copy */ - assert(obj->h_tid & GCFLAG_NURSERY_MOVED); + assert(obj->h_tid & GCFLAG_MOVED); assert(IS_POINTER(obj->h_revision)); stub = (gcptr)obj->h_revision; assert(!IS_POINTER(stub->h_revision)); /* not outdated */ @@ -56,7 +56,7 @@ if (!(obj->h_original)) obj->h_original = (revision_t)O; } - obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC); + obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC); obj->h_revision = (revision_t)O; O->h_tid |= GCFLAG_PUBLIC; @@ -105,6 +105,8 @@ stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_OLD; + if (size == 0) + stub->h_tid |= GCFLAG_SMALLSTUB; stub->h_revision = ((revision_t)obj) | 2; if (obj->h_original) { stub->h_original = obj->h_original; @@ -206,7 +208,7 @@ /* note that we should follow h_revision at least one more step: it is necessary if L is public but young (and then - has GCFLAG_NURSERY_MOVED), but it is fine to do it more + has GCFLAG_MOVED), but it is fine to do it more generally. */ v = ACCESS_ONCE(L->h_revision); if (IS_POINTER(v)) { @@ -239,7 +241,7 @@ } L->h_revision = (revision_t)O; - L->h_tid |= GCFLAG_PUBLIC | GCFLAG_NURSERY_MOVED; + L->h_tid |= GCFLAG_PUBLIC | GCFLAG_MOVED; /* subtle: we need to remove L from the fxcache of the target thread, otherwise its read barrier might not trigger on it. It is mostly fine because it is anyway identical to O. But diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -24,7 +24,8 @@ #define STM_SIZE_OF_USER_TID (sizeof(revision_t) / 2) /* in bytes */ #define STM_FIRST_GCFLAG (1L << (8 * STM_SIZE_OF_USER_TID)) #define STM_USER_TID_MASK (STM_FIRST_GCFLAG - 1) -#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 4 + 8)) +#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * ((1<<0) | (1<<1) | \ + (1<<2) | (1<<3) | (1<<13))) #define PREBUILT_REVISION 1 diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c --- a/rpython/translator/stm/src_stm/stmsync.c +++ b/rpython/translator/stm/src_stm/stmsync.c @@ -53,7 +53,7 @@ static void init_shadowstack(void) { struct tx_descriptor *d = thread_descriptor; - d->shadowstack = malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); + d->shadowstack = stm_malloc(sizeof(gcptr) * LENGTH_SHADOW_STACK); if (!d->shadowstack) { stm_fatalerror("out of memory: shadowstack\n"); } @@ -69,7 +69,7 @@ assert(x == END_MARKER_ON); assert(stm_shadowstack == d->shadowstack); stm_shadowstack = NULL; - free(d->shadowstack); + stm_free(d->shadowstack, sizeof(gcptr) * LENGTH_SHADOW_STACK); } void stm_set_max_aborts(int max_aborts) diff --git a/rpython/translator/stm/src_stm/weakref.c b/rpython/translator/stm/src_stm/weakref.c --- a/rpython/translator/stm/src_stm/weakref.c +++ b/rpython/translator/stm/src_stm/weakref.c @@ -28,7 +28,7 @@ */ while (gcptrlist_size(&d->young_weakrefs) > 0) { gcptr weakref = gcptrlist_pop(&d->young_weakrefs); - if (!(weakref->h_tid & GCFLAG_NURSERY_MOVED)) + if (!(weakref->h_tid & GCFLAG_MOVED)) continue; /* the weakref itself dies */ weakref = (gcptr)weakref->h_revision; @@ -37,7 +37,7 @@ assert(pointing_to != NULL); if (stmgc_is_in_nursery(d, pointing_to)) { - if (pointing_to->h_tid & GCFLAG_NURSERY_MOVED) { + if (pointing_to->h_tid & GCFLAG_MOVED) { dprintf(("weakref ptr moved %p->%p\n", WEAKREF_PTR(weakref, size), (gcptr)pointing_to->h_revision)); @@ -69,49 +69,25 @@ static _Bool is_partially_visited(gcptr obj) { - /* Based on gcpage.c:visit(). Check the code here if we simplify - visit(). Returns True or False depending on whether we find any - version of 'obj' to be VISITED or not. + /* Based on gcpage.c:visit_public(). Check the code here if we change + visit_public(). Returns True or False depending on whether we find any + version of 'obj' to be MARKED or not. */ - restart: - if (obj->h_tid & GCFLAG_VISITED) + assert(IMPLIES(obj->h_tid & GCFLAG_VISITED, + obj->h_tid & GCFLAG_MARKED)); + if (obj->h_tid & GCFLAG_MARKED) return 1; - if (obj->h_revision & 1) { - assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - assert(!(obj->h_tid & GCFLAG_STUB)); + if (!(obj->h_tid & GCFLAG_PUBLIC)) return 0; - } - else if (obj->h_tid & GCFLAG_PUBLIC) { - /* h_revision is a ptr: we have a more recent version */ - if (!(obj->h_revision & 2)) { - /* go visit the more recent version */ - obj = (gcptr)obj->h_revision; - } - else { - /* it's a stub */ - assert(obj->h_tid & GCFLAG_STUB); - obj = (gcptr)(obj->h_revision - 2); - } - goto restart; - } - else { - assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - gcptr B = (gcptr)obj->h_revision; - assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY)); - if (B->h_tid & GCFLAG_VISITED) + + if (obj->h_original != 0 && + !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { + gcptr original = (gcptr)obj->h_original; + assert(IMPLIES(original->h_tid & GCFLAG_VISITED, + original->h_tid & GCFLAG_MARKED)); + if (original->h_tid & GCFLAG_MARKED) return 1; - assert(!(obj->h_tid & GCFLAG_STUB)); - assert(!(B->h_tid & GCFLAG_STUB)); - - if (IS_POINTER(B->h_revision)) { - assert(B->h_tid & GCFLAG_PUBLIC); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(B->h_revision & 2)); - - obj = (gcptr)B->h_revision; - goto restart; - } } return 0; } diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -1,5 +1,5 @@ import py -from rpython.rlib import rstm, rgc +from rpython.rlib import rstm, rgc, objectmodel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rclass from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr @@ -276,3 +276,30 @@ t, cbuilder = self.compile(main) data = cbuilder.cmdexec('a b') assert 'li102ee10:hi there 3e\n0\n' in data + + def test_weakref(self): + import weakref + class Foo(object): + pass + + def f(argv): + foo = Foo() + foo.n = argv + w = weakref.ref(foo) + assert w() is foo + objectmodel.keepalive_until_here(foo) + return w + f._dont_inline_ = True + + def main(argv): + w = f(argv) + assert w() is not None + assert len(w().n) == len(argv) + rgc.collect() + assert w() is None + print 'test ok' + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('a b') + assert 'test ok\n' in data From noreply at buildbot.pypy.org Mon Jul 29 11:29:03 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 29 Jul 2013 11:29:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: adapt to new api Message-ID: <20130729092903.D21481C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65772:17d05e7de409 Date: 2013-07-29 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/17d05e7de409/ Log: adapt to new api diff --git a/rpython/translator/stm/test/targetjit1.py b/rpython/translator/stm/test/targetjit1.py --- a/rpython/translator/stm/test/targetjit1.py +++ b/rpython/translator/stm/test/targetjit1.py @@ -52,17 +52,13 @@ def run(self): try: - rstm.perform_transaction(ThreadRunner.run_really, - ThreadRunner, self) + while self.value < glob.LENGTH: + jitdriver.jit_merge_point(self=self) + glob.node = Node(self.value, glob.node) + self.value += 1 finally: self.finished_lock.release() - def run_really(self, retry_counter): - jitdriver.jit_merge_point(self=self) - glob.node = Node(self.value, glob.node) - self.value += 1 - return int(self.value < glob.LENGTH) - jitdriver = jit.JitDriver(greens=[], reds=['self']) # ____________________________________________________________ @@ -84,9 +80,9 @@ bootstrapper.lock = None bootstrapper.args = None - def _freeze_(self): - self.reinit() - return False + # def _freeze_(self): + # self.reinit() + # return False @staticmethod def bootstrap(): From noreply at buildbot.pypy.org Mon Jul 29 11:29:05 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 29 Jul 2013 11:29:05 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add stm_dont_track_raw_accesses hint for classes; use it for 'aroundstate' Message-ID: <20130729092905.30FB81C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65773:ba7123a9d332 Date: 2013-07-29 11:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ba7123a9d332/ Log: add stm_dont_track_raw_accesses hint for classes; use it for 'aroundstate' also mark ExcData to not track. this may require clearing it on abort! diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -367,6 +367,7 @@ if hints is None: hints = {} hints = self._check_for_immutable_hints(hints) + hints = self._check_for_stm_hints(hints) kwds = {} if self.gcflavor == 'gc': kwds['rtti'] = True diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -329,6 +329,7 @@ class AroundState: _alloc_flavor_ = "raw" + _stm_dont_track_raw_accesses_ = True def _cleanup_(self): self.before = None # or a regular RPython function diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -46,6 +46,10 @@ class ImmutableConflictError(Exception): """Raised when the _immutable_ or _immutable_fields_ hints are not consistent across a class hierarchy.""" + +class StmHintConflictError(Exception): + """Raised when the _stm_dont_track_raw_accesses_ hints are + not consistent across a class hierarchy.""" def getclassrepr(rtyper, classdef): @@ -215,6 +219,26 @@ hints['immutable_fields'] = accessor return hints + def _check_for_stm_hints(self, hints): + loc = self.classdef.classdesc.lookup('_stm_dont_track_raw_accesses_') + if loc is not None: + if loc is not self.classdef.classdesc: + raise StmHintConflictError( + "class %r inherits from its parent" + " _immutable__stm_dont_track_raw_accesses_=True, " + "so it should also declare" + " _stm_dont_track_raw_accesses_=True" % ( + self.classdef,)) + if loc.classdict.get('_stm_dont_track_raw_accesses_').value is not True: + raise TyperError( + "class %r: _stm_dont_track_raw_accesses_ = something " + "else than True" % ( + self.classdef,)) + hints = hints.copy() + hints['stm_dont_track_raw_accesses'] = True + return hints + + def __repr__(self): if self.classdef is None: clsname = 'object' diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -470,7 +470,8 @@ EXCDATA = lltype.Struct('ExcData', ('exc_type', self.lltype_of_exception_type), ('exc_value', self.lltype_of_exception_value), - hints={'stm_thread_local': True}) + hints={'stm_thread_local': True, + 'stm_dont_track_raw_accesses':True}) self.EXCDATA = EXCDATA exc_data = lltype.malloc(EXCDATA, immortal=True) diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py --- a/rpython/translator/stm/test/test_inevitable.py +++ b/rpython/translator/stm/test/test_inevitable.py @@ -224,4 +224,22 @@ res = self.interpret_inevitable(f1, [True]) assert res is None + def test_raw_class_hint(self): + class A: + _alloc_flavor_ = "raw" + _stm_dont_track_raw_accesses_ = True + def __init__(self): self.x = 1 + def f2(): + return A() + + def f(i): + a = f2() + a.x = i + i = a.x + lltype.free(a, flavor='raw') + return i + + res = self.interpret_inevitable(f, [2]) + assert res == 'free' # not setfield or getfield + From noreply at buildbot.pypy.org Mon Jul 29 11:29:06 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 29 Jul 2013 11:29:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add failing test for repeating a read_barrier after a write_barrier Message-ID: <20130729092906.655B61C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65774:ca6cec2712bf Date: 2013-07-29 11:28 +0200 http://bitbucket.org/pypy/pypy/changeset/ca6cec2712bf/ Log: add failing test for repeating a read_barrier after a write_barrier diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -64,6 +64,26 @@ jump() """, t=NULL) + def test_invalidate_read_status_after_write(self): + self.check_rewrite(""" + [p0] + p1 = same_as(p0) + p2 = same_as(p0) + p4 = getfield_gc(p1, descr=tzdescr) + setfield_gc(p2, p0, descr=tzdescr) + p5 = getfield_gc(p1, descr=tzdescr) + """, """ + [p0] + p1 = same_as(p0) + p2 = same_as(p0) + cond_call_stm_b(p1, descr=P2Rdescr) + p4 = getfield_gc(p1, descr=tzdescr) + cond_call_stm_b(p2, descr=P2Wdescr) + setfield_gc(p2, p0, descr=tzdescr) + cond_call_stm_b(p1, descr=P2Rdescr) + p5 = getfield_gc(p1, descr=tzdescr) + """) + def test_rewrite_write_barrier_after_malloc(self): self.check_rewrite(""" [p1, p3] From noreply at buildbot.pypy.org Mon Jul 29 12:06:07 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 29 Jul 2013 12:06:07 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: an announcement for the demo evening Message-ID: <20130729100607.9977C1C0D35@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r5011:6ab14e85a0c2 Date: 2013-07-29 12:04 +0200 http://bitbucket.org/pypy/extradoc/changeset/6ab14e85a0c2/ Log: an announcement for the demo evening diff --git a/sprintinfo/london-2013/demo-announcement.txt b/sprintinfo/london-2013/demo-announcement.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/london-2013/demo-announcement.txt @@ -0,0 +1,29 @@ +PyPy is a fast Python VM. Maybe you've never used PyPy and want to find out +what use it might be for you? Or you and your organisation have been using it +and you want to find out more about how it works under the hood? If so, this +demo session is for you! + +Members of the PyPy team will give a series of lightning talks on PyPy: its +benefits; how it works; research currently being undertaken to make it +faster; and unusual uses it can be put to. Speakers will be available +afterwards for informal discussions. This is the first time an event like +this has been held in the UK, and is a unique opportunity to speak to core +people. Speakers confirmed thus far include: Armin Rigo, `Maciej Fijałkowski`_, +`Carl Friedrich Bolz`_, `Lukas Diekmann`_, `Laurence Tratt`__. + +.. __: http://tratt.net/laurie/ + +.. _`Maciej Fijałkowski`: http://baroquesoftware.com/ +.. _`Carl Friedrich Bolz`: http://cfbolz.de +.. _`Lukas Diekmann`: http://lukasdiekmann.com/ + +The venue for this talk is the `Software Development Team`_, King's College +London. The main entrance is on the Strand, from where the room for the event +will be clearly signposted. Travel directions can be found at +http://www.kcl.ac.uk/campuslife/campuses/directions/strand.aspx + +.. _`Software Development Team`: http://soft-dev.org/ + +If you have any questions about the event, please contact `Laurence Tratt`_ + +.. _`Laurence Tratt`: mailto:laurie at tratt.net From noreply at buildbot.pypy.org Mon Jul 29 12:40:19 2013 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 29 Jul 2013 12:40:19 +0200 (CEST) Subject: [pypy-commit] pypy default: ups fix Message-ID: <20130729104019.3ADBE1C00D8@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65775:ee83b070300a Date: 2013-07-29 12:39 +0200 http://bitbucket.org/pypy/pypy/changeset/ee83b070300a/ Log: ups fix diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py --- a/pypy/tool/jitlogparser/parser.py +++ b/pypy/tool/jitlogparser/parser.py @@ -396,7 +396,7 @@ comm = loop.comment comm = comm.lower() if comm.startswith('# bridge'): - m = re.search('guard (-?[\da-f]+)', comm) + m = re.search('guard 0x(-?[\da-f]+)', comm) name = 'guard ' + m.group(1) elif "(" in comm: name = comm[2:comm.find('(')-1] @@ -460,4 +460,4 @@ if __name__ == '__main__': import_log(sys.argv[1]) - + From noreply at buildbot.pypy.org Mon Jul 29 13:26:09 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 13:26:09 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add myself (London 2013). Message-ID: <20130729112609.992C51C014D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5012:8ac2a032fde2 Date: 2013-07-29 13:24 +0200 http://bitbucket.org/pypy/extradoc/changeset/8ac2a032fde2/ Log: Add myself (London 2013). diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -20,6 +20,7 @@ Remi Meier 24/8-1/9 ? Marko Bencun 24/8-1/9 ? Maciej Fijalkowski 25/8-1/9 private +Manuel Jacob ? sth. cheap, pref. share ==================== ============== ======================= From noreply at buildbot.pypy.org Mon Jul 29 14:55:35 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 14:55:35 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Add auto-convertion method descr_contains() in W_BytesObject. Message-ID: <20130729125535.256181C0359@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65777:fcbbf370c6ac Date: 2013-07-29 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/fcbbf370c6ac/ Log: Add auto-convertion method descr_contains() in W_BytesObject. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -204,6 +204,12 @@ return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) return StringMethods._endswith(self, space, value, w_suffix, start, end) + def descr_contains(self, space, w_sub): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_unicode = unicode_from_encoded_object(space, self, None, None) + return space.newbool(self_as_unicode._value.find(self._op_val(space, w_sub)) >= 0) + return StringMethods.descr_contains(self, space, w_sub) + @unwrap_spec(count=int) def descr_replace(self, space, w_old, w_new, count=-1): old_is_unicode = space.isinstance_w(w_old, space.w_unicode) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -86,6 +86,7 @@ def test_contains(self): assert u'a' in 'abc' assert 'a' in u'abc' + raises(UnicodeDecodeError, "u'\xe2' in 'g\xe2teau'") def test_splitlines(self): assert u''.splitlines() == [] From noreply at buildbot.pypy.org Mon Jul 29 14:55:33 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 14:55:33 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Test and fix to make bytearray.partition() return a new object. Message-ID: <20130729125533.DE3881C014D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65776:21cef1d9c01c Date: 2013-07-29 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/21cef1d9c01c/ Log: Test and fix to make bytearray.partition() return a new object. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -484,6 +484,9 @@ space.wrap("empty separator")) pos = value.find(sub) if pos == -1: + from pypy.objspace.std.bytearrayobject import W_BytearrayObject + if isinstance(self, W_BytearrayObject): + self = self._new(value) return space.newtuple([self, self._empty(), self._empty()]) else: from pypy.objspace.std.bytearrayobject import W_BytearrayObject diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -462,3 +462,7 @@ for i in range(count): b[i:i+1] = 'y' assert str(b) == 'y' * count + + def test_partition_return_copy(self): + b = bytearray(b'foo') + assert b.partition(b'x')[0] is not b From noreply at buildbot.pypy.org Mon Jul 29 14:55:36 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 14:55:36 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix app-level unicode's istitle() method. Message-ID: <20130729125536.A6CB01C014D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65778:76dd0077753a Date: 2013-07-29 14:52 +0200 http://bitbucket.org/pypy/pypy/changeset/76dd0077753a/ Log: Fix app-level unicode's istitle() method. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -357,7 +357,7 @@ for pos in range(0, len(input)): ch = input[pos] - if self._isupper(ch): + if self._istitle(ch): if previous_is_cased: return space.w_False previous_is_cased = True diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -215,7 +215,9 @@ assert u"!Brown Fox".istitle() == True assert u"Brow&&&&N Fox".istitle() == True assert u"!Brow&&&&n Fox".istitle() == False - + assert u'\u1FFc'.istitle() + assert u'Greek \u1FFcitlecases ...'.istitle() + def test_capitalize(self): assert u"brown fox".capitalize() == u"Brown fox" assert u' hello '.capitalize() == u' hello ' diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -105,7 +105,7 @@ return unicodedb.isnumeric(ord(ch)) def _istitle(self, ch): - return unicodedb.istitle(ord(ch)) + return unicodedb.isupper(ord(ch)) or unicodedb.istitle(ord(ch)) def _isspace(self, ch): return unicodedb.isspace(ord(ch)) From noreply at buildbot.pypy.org Mon Jul 29 14:58:07 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 14:58:07 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130729125807.AA2BF1C014D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65779:01ac7f86b2ed Date: 2013-07-29 14:55 +0200 http://bitbucket.org/pypy/pypy/changeset/01ac7f86b2ed/ Log: hg merge default diff too long, truncating to 2000 out of 48429 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,11 +48,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "_sha", "cStringIO", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -340,10 +335,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -351,10 +342,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,65 +65,56 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ @@ -258,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -300,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is @@ -84,12 +83,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -72,8 +72,6 @@ `command line reference`_ -`CLI backend`_ describes the details of the .NET backend. - `JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler from our Python interpreter. diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -432,226 +432,6 @@ See for example `rpython/rtyper/rlist.py`_. -.. _`oo type`: - -Object Oriented Types ---------------------- - -The standard `low-level type` model described above is fine for -targeting low level backends such as C, but it is not good -enough for targeting higher level backends such as .NET CLI or Java -JVM, so a new object oriented model has been introduced. This model is -implemented in the first part of `rpython/rtyper/ootypesystem/ootype.py`_. - -As for the low-level typesystem, the second part of -`rpython/rtyper/ootypesystem/ootype.py`_ is a runnable implementation of -these types, for testing purposes. - - -The target platform -+++++++++++++++++++ - -There are plenty of object oriented languages and platforms around, -each one with its own native features: they could be statically or -dynamically typed, they could support or not things like multiple -inheritance, classes and functions as first class order objects, -generics, and so on. - -The goal of *ootypesystem* is to define a trade-off between all -the potential backends that let them to use the native facilities when -available while not preventing other backends to work when they -aren't. - - -Types and classes -+++++++++++++++++ - -Most of the primitive types defined in *ootypesystem* are the very -same of those found in *lltypesystem*: ``Bool``, ``Signed``, -``Unsigned``, ``Float``, ``Char``, ``UniChar`` and ``Void``. - -The target platform is supposed to support classes and instances with -**single inheritance**. Instances of user-defined classes are mapped -to the ``Instance`` type, whose ``_superclass`` attribute indicates -the base class of the instance. At the very beginning of the -inheritance hierarchy there is the ``Root`` object, i.e. the common -base class between all instances; if the target platform has the -notion of a common base class too, the backend can choose to map the -``Root`` class to its native equivalent. - -Object of ``Instance`` type can have attributes and methods: -attributes are got and set by the ``oogetfield`` and ``oosetfield`` -operations, while method calls are expressed by the ``oosend`` -operation. - -Classes are passed around using the ``Class`` type: this is a first -order class type whose only goal is to allow **runtime instantiation** -of the class. Backends that don't support this feature natively, such -as Java, may need to use some sort of placeholder instead. - - -Static vs. dynamic typing -+++++++++++++++++++++++++ - -The target platform is assumed to be **statically typed**, i.e. the -type of each object is known at compile time. - -As usual, it is possible to convert an object from type to type only -under certain conditions; there is a number of predefined conversions -between primitive types such as from ``Bool`` to ``Signed`` or from -``Signed`` to ``Float``. For each one of these conversions there is a -corresponding low level operation, such as ``cast_bool_to_int`` and -``cast_int_to_float``. - -Moreover it is possible to cast instances of a class up and down the -inheritance hierarchy with the ``ooupcast`` and ``oodowncast`` low -level operations. Implicit upcasting is not allowed, so you really -need to do a ``ooupcast`` for converting from a subclass to a -superclass. - -With this design statically typed backends can trivially insert -appropriate casts when needed, while dynamically typed backends can -simply ignore some of the operation such as ``ooupcast`` and -``oodowncast``. Backends that supports implicit upcasting, such as CLI -and Java, can simply ignore only ``ooupcast``. - -Object model -++++++++++++ - -The object model implemented by ootype is quite Java-like. The -following is a list of key features of the ootype object model which -have a direct correspondence in the Java or .NET object model: - - - classes have a static set of strongly typed methods and - attributes; - - - methods can be overriden in subclasses; every method is "virtual" - (i.e., can be overridden); methods can be "abstract" (i.e., need - to be overridden in subclasses); - - - classes support single inheritance; all classes inherit directly - or indirectly from the ROOT class; - - - there is some support for method overloading. This feature is not - used by the RTyper itself because RPython doesn't support method - overloading, but it is used by the GenCLI backend for offering - access to the native .NET libraries (see XXX); - - - all classes, attributes and methods are public: ootype is only - used internally by the translator, so there is no need to enforce - accessibility rules; - - - classes and functions are first-class order objects: this feature - can be easily simulated by backends for platforms on which it is not - a native feature; - - - there is a set of `built-in types`_ offering standard features. - -Exception handling -++++++++++++++++++ - -Since flow graphs are meant to be used also for very low level -backends such as C, they are quite unstructured: this means that the -target platform doesn't need to have a native exception handling -mechanism, since at the very least the backend can handle exceptions -just like ``genc`` does. - -By contrast we know that most of high level platforms natively support -exception handling, so *ootypesystem* is designed to let them to use -it. In particular the exception instances are typed with the -``Instance`` type, so the usual inheritance exception hierarchy is -preserved and the native way to catch exception should just work. - -.. `built-in types`_ - -Built-in types -++++++++++++++ - -It seems reasonable to assume high level platforms to provide built-in -facilities for common types such as *lists* or *hashtables*. - -RPython standard types such as ``List`` and ``Dict`` are implemented -on top of these common types; at the moment of writing there are six -built-in types: - - - **String**: self-descriptive - - - **StringBuilder**: used for dynamic building of string - - - **List**: a variable-sized, homogeneous list of object - - - **Dict**: a hashtable of homogeneous keys and values - - - **CustomDict**: same as dict, but with custom equal and hash - functions - - - **DictItemsIterator**: a helper class for iterating over the - elements of a ``Dict`` - - -Each of these types is a subtype of ``BuiltinADTType`` and has set of -ADT (Abstract Data Type) methods (hence the name of the base class) -for being manipulated. Examples of ADT methods are ``ll_length`` for -``List`` and ``ll_get`` for ``Dict``. - -From the backend point of view an instance of a built-in types is -treated exactly as a plain ``Instance``, so usually no special-casing -is needed. The backend is supposed to provide a bunch of classes -wrapping the native ones in order to provide the right signature and -semantic for the ADT methods. - -As an alternative, backends can special-case the ADT types to map them -directly to the native equivalent, translating the method names -on-the-fly at compile time. - -Generics -++++++++ - -Some target platforms offer native support for **generics**, i.e. -classes that can be parametrized on types, not only values. For -example, if one wanted to create a list using generics, a possible -declaration would be to say ``List``, where ``T`` represented the -type. When instantiated, one could create ``List`` or -``List``. The list is then treated as a list of whichever type -is specified. - -Each subclass of ``BuiltinADTTypes`` defines a bunch of type -parameters by creating some class level placeholder in the form of -``PARAMNAME_T``; then it fills up the ``_GENERIC_METHODS`` attribute -by defining the signature of each of the ADT methods using those From noreply at buildbot.pypy.org Mon Jul 29 15:00:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 15:00:08 +0200 (CEST) Subject: [pypy-commit] pypy default: A random test for COND_CALL. It can fail so far. Message-ID: <20130729130008.8DADC1C014D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65780:0a06c71baefa Date: 2013-07-29 14:58 +0200 http://bitbucket.org/pypy/pypy/changeset/0a06c71baefa/ Log: A random test for COND_CALL. It can fail so far. diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -502,6 +502,7 @@ # 3. raising call and wrong guard_exception # 4. raising call and guard_no_exception # 5. non raising call and guard_exception +# (6. test of a cond_call, always non-raising and guard_no_exception) class BaseCallOperation(test_random.AbstractOperation): def non_raising_func_code(self, builder, r): @@ -648,6 +649,34 @@ builder.guard_op = op builder.loop.operations.append(op) +# 6. a conditional call (for now always with no exception raised) +class CondCallOperation(BaseCallOperation): + def produce_into(self, builder, r): + fail_subset = builder.subset_of_intvars(r) + v_cond = builder.get_bool_var(r) + subset = builder.subset_of_intvars(r)[:4] + for i in range(len(subset)): + if r.random() < 0.35: + subset[i] = ConstInt(r.random_integer()) + # + seen = [] + def call_me(*args): + if len(seen) == 0: + seen.append(args) + else: + assert seen[0] == args + # + TP = lltype.FuncType([lltype.Signed] * len(subset), lltype.Void) + ptr = llhelper(lltype.Ptr(TP), call_me) + c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) + args = [v_cond, c_addr] + subset + descr = self.getcalldescr(builder, TP) + self.put(builder, args, descr) + op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, + descr=builder.getfaildescr()) + op.setfailargs(fail_subset) + builder.loop.operations.append(op) + # ____________________________________________________________ OPERATIONS = test_random.OPERATIONS[:] @@ -684,6 +713,7 @@ OPERATIONS.append(RaisingCallOperationGuardNoException(rop.CALL)) OPERATIONS.append(RaisingCallOperationWrongGuardException(rop.CALL)) OPERATIONS.append(CallOperationException(rop.CALL)) + OPERATIONS.append(CondCallOperation(rop.COND_CALL)) OPERATIONS.append(GuardNonNullClassOperation(rop.GUARD_NONNULL_CLASS)) LLtypeOperationBuilder.OPERATIONS = OPERATIONS From noreply at buildbot.pypy.org Mon Jul 29 15:00:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 15:00:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the test and generate more efficient code. Message-ID: <20130729130009.C4F3B1C014D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65781:caa2340430f2 Date: 2013-07-29 14:59 +0200 http://bitbucket.org/pypy/pypy/changeset/caa2340430f2/ Log: Fix the test and generate more efficient code. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -14,7 +14,7 @@ from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken from rpython.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, - gpr_reg_mgr_cls, xmm_reg_mgr_cls, _register_arguments) + gpr_reg_mgr_cls, xmm_reg_mgr_cls) from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) from rpython.jit.backend.x86.arch import (FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, @@ -154,7 +154,11 @@ come. """ mc = codebuf.MachineCodeBlockWrapper() - self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) + # copy registers to the frame, with the exception of the + # 'cond_call_register_arguments' and eax, because these have already + # been saved by the caller + self._push_all_regs_to_frame(mc, cond_call_register_arguments + [eax], + supports_floats, callee_only) if IS_X86_64: mc.SUB(esp, imm(WORD)) self.set_extra_stack_depth(mc, 2 * WORD) @@ -164,7 +168,7 @@ mc.SUB(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 8 * WORD) for i in range(4): - mc.MOV_sr(i * WORD, _register_arguments[i].value) + mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) mc.CALL(eax) if IS_X86_64: mc.ADD(esp, imm(WORD)) @@ -172,8 +176,7 @@ mc.ADD(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 0) self._reload_frame_if_necessary(mc, align_stack=True) - self._pop_all_regs_from_frame(mc, [], supports_floats, - callee_only) + self._pop_all_regs_from_frame(mc, [], supports_floats, callee_only) mc.RET() return mc.materialize(self.cpu.asmmemmgr, []) @@ -1755,7 +1758,7 @@ regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - for i, gpr in enumerate(regs): + for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] mc.MOV_br(v * WORD + base_ofs, gpr.value) @@ -1777,7 +1780,7 @@ regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - for i, gpr in enumerate(regs): + for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] mc.MOV_rb(gpr.value, v * WORD + base_ofs) @@ -2161,11 +2164,29 @@ def label(self): self._check_frame_depth_debug(self.mc) - def cond_call(self, op, gcmap, cond_loc, call_loc): - self.mc.TEST(cond_loc, cond_loc) + def cond_call(self, op, gcmap, loc_cond, imm_func, arglocs): + self.mc.TEST(loc_cond, loc_cond) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() + # self.push_gcmap(self.mc, gcmap, store=True) + # + # first save away the 4 registers from 'cond_call_register_arguments' + # plus the register 'eax' + base_ofs = self.cpu.get_baseofs_of_frame_field() + for gpr in cond_call_register_arguments + [eax]: + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + self.mc.MOV_br(v * WORD + base_ofs, gpr.value) + # + # load the 0-to-4 arguments into these registers + from rpython.jit.backend.x86.jump import remap_frame_layout + remap_frame_layout(self, arglocs, + cond_call_register_arguments[:len(arglocs)], eax) + # + # load the constant address of the function to call into eax + self.mc.MOV(eax, imm_func) + # + # figure out which variant of cond_call_slowpath to call, and call it callee_only = False floats = False if self._regalloc is not None: @@ -2348,5 +2369,7 @@ os.write(2, '[x86/asm] %s\n' % msg) raise NotImplementedError(msg) +cond_call_register_arguments = [edi, esi, edx, ecx] + class BridgeAlreadyCompiled(Exception): pass diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -119,8 +119,6 @@ for _i, _reg in enumerate(gpr_reg_mgr_cls.all_regs): gpr_reg_mgr_cls.all_reg_indexes[_reg.value] = _i -_register_arguments = [edi, esi, edx, ecx] - class RegAlloc(BaseRegalloc): @@ -803,21 +801,15 @@ def consider_cond_call(self, op): assert op.result is None args = op.getarglist() - assert 2 <= len(args) <= 4 + 2 - tmpbox = TempBox() - self.rm.force_allocate_reg(tmpbox, selected_reg=eax) + assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments + loc_cond = self.make_sure_var_in_reg(args[0], args) v = args[1] assert isinstance(v, Const) - imm = self.rm.convert_to_imm(v) - self.assembler.regalloc_mov(imm, eax) - args_so_far = [tmpbox] - for i in range(2, len(args)): - reg = _register_arguments[i - 2] - self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) - args_so_far.append(args[i]) - loc_cond = self.make_sure_var_in_reg(args[0], args) - self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax) - self.rm.possibly_free_var(tmpbox) + imm_func = self.rm.convert_to_imm(v) + arglocs = [self.loc(args[i]) for i in range(2, len(args))] + gcmap = self.get_gcmap() + self.rm.possibly_free_var(args[0]) + self.assembler.cond_call(op, gcmap, loc_cond, imm_func, arglocs) def consider_call_malloc_nursery(self, op): size_box = op.getarg(0) From noreply at buildbot.pypy.org Mon Jul 29 15:15:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 15:15:53 +0200 (CEST) Subject: [pypy-commit] pypy default: fix Message-ID: <20130729131553.6C0E91C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65782:ee82feb0142b Date: 2013-07-29 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/ee82feb0142b/ Log: fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2181,7 +2181,8 @@ # load the 0-to-4 arguments into these registers from rpython.jit.backend.x86.jump import remap_frame_layout remap_frame_layout(self, arglocs, - cond_call_register_arguments[:len(arglocs)], eax) + cond_call_register_arguments[:len(arglocs)], + X86_64_SCRATCH_REG if IS_X86_64 else None) # # load the constant address of the function to call into eax self.mc.MOV(eax, imm_func) From noreply at buildbot.pypy.org Mon Jul 29 15:23:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 15:23:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Only save registers that really contain something Message-ID: <20130729132351.006051C014D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65783:80e614cc3039 Date: 2013-07-29 15:22 +0200 http://bitbucket.org/pypy/pypy/changeset/80e614cc3039/ Log: Only save registers that really contain something diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2174,7 +2174,10 @@ # first save away the 4 registers from 'cond_call_register_arguments' # plus the register 'eax' base_ofs = self.cpu.get_baseofs_of_frame_field() + should_be_saved = self._regalloc.rm.reg_bindings.values() for gpr in cond_call_register_arguments + [eax]: + if gpr not in should_be_saved: + continue v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] self.mc.MOV_br(v * WORD + base_ofs, gpr.value) # From noreply at buildbot.pypy.org Mon Jul 29 16:08:03 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 16:08:03 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Remove these imports. Message-ID: <20130729140803.2D0681C136D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65785:7fc0e8b104b0 Date: 2013-07-29 15:33 +0200 http://bitbucket.org/pypy/pypy/changeset/7fc0e8b104b0/ Log: Remove these imports. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,11 +1,9 @@ """The builtin bytearray implementation""" -from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature -from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef From noreply at buildbot.pypy.org Mon Jul 29 16:08:05 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 16:08:05 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix translation. Message-ID: <20130729140805.AC0E81C14BA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65787:8f3122422344 Date: 2013-07-29 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/8f3122422344/ Log: Fix translation. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -58,7 +58,7 @@ return ch.islower() def _istitle(self, ch): - return ch.istitle() + return ch.isupper() def _isspace(self, ch): return ch.isspace() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -101,7 +101,7 @@ return ch.islower() def _istitle(self, ch): - return ch.istitle() + return ch.isupper() def _isspace(self, ch): return ch.isspace() From noreply at buildbot.pypy.org Mon Jul 29 16:08:04 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 16:08:04 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Remove string types from multi-method table. Message-ID: <20130729140804.666461C138E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65786:808f2aea0f13 Date: 2013-07-29 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/808f2aea0f13/ Log: Remove string types from multi-method table. diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -2,9 +2,9 @@ from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.objspace.std.bytesobject import str_typedef +from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floattype import float_typedef -from pypy.objspace.std.unicodeobject import unicode_typedef, unicode_from_object +from pypy.objspace.std.unicodeobject import W_UnicodeObject, unicode_from_object from pypy.objspace.std.inttype import int_typedef from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT @@ -682,12 +682,12 @@ __module__ = "numpypy", ) -W_StringBox.typedef = TypeDef("string_", (str_typedef, W_CharacterBox.typedef), +W_StringBox.typedef = TypeDef("string_", (W_BytesObject.typedef, W_CharacterBox.typedef), __module__ = "numpypy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (unicode_typedef, W_CharacterBox.typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_UnicodeObject.typedef, W_CharacterBox.typedef), __module__ = "numpypy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), ) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,10 +1,10 @@ """The builtin bytearray implementation""" +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature -from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods @@ -16,7 +16,7 @@ def _make_data(s): return [s[i] for i in range(len(s))] -class W_BytearrayObject(W_Object, StringMethods): +class W_BytearrayObject(W_Root, StringMethods): def __init__(w_self, data): w_self.data = data @@ -378,7 +378,7 @@ # ____________________________________________________________ -bytearray_typedef = W_BytearrayObject.typedef = StdTypeDef( +W_BytearrayObject.typedef = StdTypeDef( "bytearray", __doc__ = '''bytearray() -> an empty bytearray bytearray(sequence) -> bytearray initialized from sequence\'s items @@ -460,7 +460,6 @@ remove = interp2app(W_BytearrayObject.descr_remove), reverse = interp2app(W_BytearrayObject.descr_reverse), ) -registerimplementation(W_BytearrayObject) init_signature = Signature(['source', 'encoding', 'errors'], None, None) init_defaults = [None, None, None] diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -1,12 +1,12 @@ """The builtin str implementation""" +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format -from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.unicodeobject import (unicode_from_string, @@ -16,7 +16,7 @@ from rpython.rlib.rstring import StringBuilder, replace -class W_AbstractBytesObject(W_Object): +class W_AbstractBytesObject(W_Root): __slots__ = () def is_w(self, space, w_other): @@ -265,8 +265,6 @@ # listview_str return [s for s in value] -registerimplementation(W_BytesObject) - W_BytesObject.EMPTY = W_BytesObject('') W_BytesObject.PREBUILT = [W_BytesObject(chr(i)) for i in range(256)] del i @@ -294,7 +292,7 @@ else: return W_BytesObject(c) -str_typedef = W_BytesObject.typedef = StdTypeDef( +W_BytesObject.typedef = StdTypeDef( "str", basestring_typedef, __new__ = interp2app(W_BytesObject.descr_new), __doc__ = '''str(object) -> string diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -36,13 +36,9 @@ from pypy.objspace.std.inttype import int_typedef from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.complextype import complex_typedef - from pypy.objspace.std.basestringtype import basestring_typedef - from pypy.objspace.std.bytesobject import str_typedef - from pypy.objspace.std.bytearrayobject import bytearray_typedef from pypy.objspace.std.typeobject import type_typedef from pypy.objspace.std.slicetype import slice_typedef from pypy.objspace.std.longtype import long_typedef - from pypy.objspace.std.unicodeobject import unicode_typedef from pypy.objspace.std.nonetype import none_typedef self.pythontypes = [value for key, value in result.__dict__.items() if not key.startswith('_')] # don't look @@ -59,6 +55,7 @@ from pypy.objspace.std import listobject from pypy.objspace.std import dictmultiobject from pypy.objspace.std import setobject + from pypy.objspace.std import basestringtype from pypy.objspace.std import bytesobject from pypy.objspace.std import bytearrayobject from pypy.objspace.std import typeobject @@ -81,6 +78,10 @@ self.pythontypes.append(setobject.W_SetObject.typedef) self.pythontypes.append(setobject.W_FrozensetObject.typedef) self.pythontypes.append(iterobject.W_AbstractSeqIterObject.typedef) + self.pythontypes.append(basestringtype.basestring_typedef) + self.pythontypes.append(bytesobject.W_BytesObject.typedef) + self.pythontypes.append(bytearrayobject.W_BytearrayObject.typedef) + self.pythontypes.append(unicodeobject.W_UnicodeObject.typedef) # the set of implementation types self.typeorder = { @@ -88,14 +89,11 @@ boolobject.W_BoolObject: [], intobject.W_IntObject: [], floatobject.W_FloatObject: [], - bytesobject.W_BytesObject: [], - bytearrayobject.W_BytearrayObject: [], typeobject.W_TypeObject: [], sliceobject.W_SliceObject: [], longobject.W_LongObject: [], noneobject.W_NoneObject: [], complexobject.W_ComplexObject: [], - unicodeobject.W_UnicodeObject: [], pypy.interpreter.pycode.PyCode: [], pypy.interpreter.special.Ellipsis: [], } diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -1,12 +1,8 @@ -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject from rpython.rlib.rstring import StringBuilder from pypy.interpreter.buffer import Buffer class W_StringBufferObject(W_AbstractBytesObject): - from pypy.objspace.std.bytesobject import str_typedef as typedef - w_str = None def __init__(self, builder): @@ -34,7 +30,7 @@ def str_w(self, space): return self.force() -registerimplementation(W_StringBufferObject) +W_StringBufferObject.typedef = W_BytesObject.typedef # ____________________________________________________________ @@ -64,4 +60,3 @@ return w_self from pypy.objspace.std import bytesobject -register_all(vars(), bytesobject) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -1,13 +1,13 @@ """The builtin unicode implementation""" from pypy.interpreter import unicodehelper +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.unicodedata import unicodedb from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format -from pypy.objspace.std.model import W_Object, registerimplementation from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from rpython.rlib.objectmodel import compute_hash, compute_unique_id @@ -20,7 +20,7 @@ 'unicode_from_string', 'unicode_to_decimal_w'] -class W_UnicodeObject(W_Object, StringMethods): +class W_UnicodeObject(W_Root, StringMethods): _immutable_fields_ = ['_value'] def __init__(w_self, unistr): @@ -404,7 +404,7 @@ # ____________________________________________________________ -unicode_typedef = W_UnicodeObject.typedef = StdTypeDef( +W_UnicodeObject.typedef = StdTypeDef( "unicode", basestring_typedef, __new__ = interp2app(descr_new_), __doc__ = '''unicode(string [, encoding[, errors]]) -> object @@ -483,8 +483,6 @@ interp2app(W_UnicodeObject.descr_formatter_field_name_split), ) -unitypedef = unicode_typedef - def _create_list_from_unicode(value): # need this helper function to allow the jit to look inside and inline @@ -494,8 +492,6 @@ W_UnicodeObject.EMPTY = W_UnicodeObject(u'') -registerimplementation(W_UnicodeObject) - # Helper for converting int/long def unicode_to_decimal_w(space, w_unistr): if not isinstance(w_unistr, W_UnicodeObject): From noreply at buildbot.pypy.org Mon Jul 29 16:08:01 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 16:08:01 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Remove dead code in bytearrayobject.py. Message-ID: <20130729140801.F1A7E1C1190@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65784:e783a620e81a Date: 2013-07-29 15:24 +0200 http://bitbucket.org/pypy/pypy/changeset/e783a620e81a/ Log: Remove dead code in bytearrayobject.py. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,20 +1,15 @@ """The builtin bytearray implementation""" -from pypy.interpreter.baseobjspace import ObjSpace, W_Root +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature -from pypy.objspace.std import bytesobject -from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.inttype import wrapint from pypy.objspace.std.model import W_Object, registerimplementation -from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice -from pypy.objspace.std.stdtypedef import StdTypeDef, SMM +from pypy.objspace.std.sliceobject import W_SliceObject +from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index from rpython.rlib.objectmodel import newlist_hint, resizelist_hint from rpython.rlib.rstring import StringBuilder @@ -267,7 +262,7 @@ def descr_remove(self, space, w_char): char = space.int_w(space.index(w_char)) try: - result = self.data.remove(chr(char)) + self.data.remove(chr(char)) except ValueError: raise OperationError(space.w_ValueError, space.wrap( "value not found in bytearray")) @@ -275,42 +270,6 @@ def descr_reverse(self, space): self.data.reverse() - -bytearray_append = SMM('append', 2) -bytearray_extend = SMM('extend', 2) -bytearray_insert = SMM('insert', 3, - doc="B.insert(index, int) -> None\n\n" - "Insert a single item into the bytearray before " - "the given index.") - -bytearray_pop = SMM('pop', 2, defaults=(-1,), - doc="B.pop([index]) -> int\n\nRemove and return a " - "single item from B. If no index\nargument is given, " - "will pop the last value.") - -bytearray_remove = SMM('remove', 2, - doc="B.remove(int) -> None\n\n" - "Remove the first occurance of a value in B.") - -bytearray_reverse = SMM('reverse', 1, - doc="B.reverse() -> None\n\n" - "Reverse the order of the values in B in place.") - -bytearray_strip = SMM('strip', 2, defaults=(None,), - doc="B.strip([bytes]) -> bytearray\n\nStrip leading " - "and trailing bytes contained in the argument.\nIf " - "the argument is omitted, strip ASCII whitespace.") - -bytearray_lstrip = SMM('lstrip', 2, defaults=(None,), - doc="B.lstrip([bytes]) -> bytearray\n\nStrip leading " - "bytes contained in the argument.\nIf the argument is " - "omitted, strip leading ASCII whitespace.") - -bytearray_rstrip = SMM('rstrip', 2, defaults=(None,), - doc="'B.rstrip([bytes]) -> bytearray\n\nStrip trailing " - "bytes contained in the argument.\nIf the argument is " - "omitted, strip trailing ASCII whitespace.") - def getbytevalue(space, w_value): if space.isinstance_w(w_value, space.w_str): string = space.str_w(w_value) @@ -508,191 +467,8 @@ init_signature = Signature(['source', 'encoding', 'errors'], None, None) init_defaults = [None, None, None] -def len__Bytearray(space, w_bytearray): - result = len(w_bytearray.data) - return wrapint(space, result) -def ord__Bytearray(space, w_bytearray): - if len(w_bytearray.data) != 1: - raise OperationError(space.w_TypeError, - space.wrap("expected a character, but string" - "of length %s found" % len(w_bytearray.data))) - return space.wrap(ord(w_bytearray.data[0])) - -def getitem__Bytearray_ANY(space, w_bytearray, w_index): - # getindex_w should get a second argument space.w_IndexError, - # but that doesn't exist the first time this is called. - try: - w_IndexError = space.w_IndexError - except AttributeError: - w_IndexError = None - index = space.getindex_w(w_index, w_IndexError, "bytearray index") - try: - return space.newint(ord(w_bytearray.data[index])) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) - -def getitem__Bytearray_Slice(space, w_bytearray, w_slice): - data = w_bytearray.data - length = len(data) - start, stop, step, slicelength = w_slice.indices4(space, length) - assert slicelength >= 0 - if step == 1 and 0 <= start <= stop: - newdata = data[start:stop] - else: - newdata = _getitem_slice_multistep(data, start, step, slicelength) - return W_BytearrayObject(newdata) - -def _getitem_slice_multistep(data, start, step, slicelength): - return [data[start + i*step] for i in range(slicelength)] - -def contains__Bytearray_Int(space, w_bytearray, w_char): - char = space.int_w(w_char) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in w_bytearray.data: - if ord(c) == char: - return space.w_True - return space.w_False - -def contains__Bytearray_String(space, w_bytearray, w_str): - # XXX slow - copies, needs rewriting - w_str2 = str__Bytearray(space, w_bytearray) - return bytesobject.contains__String_String(space, w_str2, w_str) - -def contains__Bytearray_ANY(space, w_bytearray, w_sub): - # XXX slow - copies, needs rewriting - w_str = space.wrap(space.bufferstr_new_w(w_sub)) - w_str2 = str__Bytearray(space, w_bytearray) - return bytesobject.contains__String_String(space, w_str2, w_str) - -def add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - return W_BytearrayObject(data1 + data2) - -def add__Bytearray_ANY(space, w_bytearray1, w_other): - data1 = w_bytearray1.data - data2 = [c for c in space.bufferstr_new_w(w_other)] - return W_BytearrayObject(data1 + data2) - -def add__String_Bytearray(space, w_str, w_bytearray): - data2 = w_bytearray.data - data1 = [c for c in space.str_w(w_str)] - return W_BytearrayObject(data1 + data2) - -def eq__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - if len(data1) != len(data2): - return space.w_False - for i in range(len(data1)): - if data1[i] != data2[i]: - return space.w_False - return space.w_True - -def eq__Bytearray_String(space, w_bytearray, w_other): - return space.eq(str__Bytearray(space, w_bytearray), w_other) - -def eq__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_False - -def eq__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_False - -def ne__Bytearray_String(space, w_bytearray, w_other): - return space.ne(str__Bytearray(space, w_bytearray), w_other) - -def ne__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_True - -def ne__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_True - -def _min(a, b): - if a < b: - return a - return b - -def lt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] < data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) < len(data2)) - -def gt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] > data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) > len(data2)) - -def str_count__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_count__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_index__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_index__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rindex__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_rindex__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_find__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_find__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rfind__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_rfind__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_startswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_prefix, w_start, w_stop): - if space.isinstance_w(w_prefix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_prefix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_prefix)]) - return bytesobject.str_startswith__String_ANY_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - - w_prefix = space.wrap(space.bufferstr_new_w(w_prefix)) - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_startswith__String_String_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - -def str_endswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_suffix, w_start, w_stop): - if space.isinstance_w(w_suffix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_suffix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_suffix)]) - return bytesobject.str_endswith__String_ANY_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - w_suffix = space.wrap(space.bufferstr_new_w(w_suffix)) - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_endswith__String_String_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - +# XXX consider moving to W_BytearrayObject or remove def str_join__Bytearray_ANY(space, w_self, w_list): list_w = space.listview(w_list) if not list_w: @@ -711,58 +487,8 @@ newdata.extend([c for c in space.bufferstr_new_w(w_s)]) return W_BytearrayObject(newdata) -def str_decode__Bytearray_ANY_ANY(space, w_bytearray, w_encoding, w_errors): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_decode__String_ANY_ANY(space, w_str, w_encoding, w_errors) - -def str_islower__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_islower__String(space, w_str) - -def str_isupper__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_isupper__String(space, w_str) - -def str_isalpha__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_isalpha__String(space, w_str) - -def str_isalnum__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_isalnum__String(space, w_str) - -def str_isdigit__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_isdigit__String(space, w_str) - -def str_istitle__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_istitle__String(space, w_str) - -def str_isspace__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return bytesobject.str_isspace__String(space, w_str) - _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -def bytearray_strip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 1) - -def bytearray_strip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 1, 1) - -def bytearray_lstrip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 0) - -def bytearray_lstrip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 1, 0) - -def bytearray_rstrip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 0, 1) - -def bytearray_rstrip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 0, 1) - #XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): if slicelength==0: @@ -841,27 +567,6 @@ items[start] = sequence2[i] start += step -def _strip(space, w_bytearray, u_chars, left, right): - # note: mostly copied from bytesobject._strip - # should really be shared - u_self = w_bytearray.data - - lpos = 0 - rpos = len(u_self) - - if left: - while lpos < rpos and u_self[lpos] in u_chars: - lpos += 1 - - if right: - while rpos > lpos and u_self[rpos - 1] in u_chars: - rpos -= 1 - assert rpos >= 0 - - return new_bytearray(space, space.w_bytearray, u_self[lpos:rpos]) - -# __________________________________________________________ -# Buffer interface class BytearrayBuffer(RWBuffer): def __init__(self, data): @@ -875,7 +580,3 @@ def setitem(self, index, char): self.data[index] = char - -def buffer__Bytearray(space, self): - b = BytearrayBuffer(self.data) - return space.wrap(b) From noreply at buildbot.pypy.org Mon Jul 29 16:16:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 16:16:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the comments (only). Message-ID: <20130729141610.4E0091C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65788:de8a5af76f3a Date: 2013-07-29 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/de8a5af76f3a/ Log: Fix the comments (only). diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -156,17 +156,20 @@ mc = codebuf.MachineCodeBlockWrapper() # copy registers to the frame, with the exception of the # 'cond_call_register_arguments' and eax, because these have already - # been saved by the caller + # been saved by the caller. Note that this is not symmetrical: + # these 5 registers are saved by the caller but restored here at + # the end of this function. self._push_all_regs_to_frame(mc, cond_call_register_arguments + [eax], supports_floats, callee_only) if IS_X86_64: - mc.SUB(esp, imm(WORD)) + mc.SUB(esp, imm(WORD)) # alignment self.set_extra_stack_depth(mc, 2 * WORD) + # the arguments are already in the correct registers else: - # we want space for 3 arguments + call + alignment - # the caller is responsible for putting arguments in the right spot + # we want space for 4 arguments + call + alignment mc.SUB(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 8 * WORD) + # store the arguments at the correct place in the stack for i in range(4): mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) mc.CALL(eax) From noreply at buildbot.pypy.org Mon Jul 29 16:32:29 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 16:32:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Move pop_gcmap() into the helper. Message-ID: <20130729143229.E13B81C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65789:285d5480fe3c Date: 2013-07-29 16:31 +0200 http://bitbucket.org/pypy/pypy/changeset/285d5480fe3c/ Log: Move pop_gcmap() into the helper. diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -180,6 +180,7 @@ self.set_extra_stack_depth(mc, 0) self._reload_frame_if_necessary(mc, align_stack=True) self._pop_all_regs_from_frame(mc, [], supports_floats, callee_only) + self.pop_gcmap(mc) # push_gcmap(store=True) done by the caller mc.RET() return mc.materialize(self.cpu.asmmemmgr, []) @@ -2206,11 +2207,13 @@ floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] self.mc.CALL(imm(cond_call_adr)) - self.pop_gcmap(self.mc) - # never any result value + # restoring the registers saved above, and doing pop_gcmap(), is left + # to the cond_call_slowpath helper. We never have any result value. offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) + # XXX if the next operation is a GUARD_NO_EXCEPTION, we should + # somehow jump over it too in the fast path def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned From noreply at buildbot.pypy.org Mon Jul 29 16:36:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 16:36:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Better docstring Message-ID: <20130729143624.AAEBA1C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65790:c70b14d46ab9 Date: 2013-07-29 16:35 +0200 http://bitbucket.org/pypy/pypy/changeset/c70b14d46ab9/ Log: Better docstring diff --git a/rpython/translator/goal/targetjitstandalone.py b/rpython/translator/goal/targetjitstandalone.py --- a/rpython/translator/goal/targetjitstandalone.py +++ b/rpython/translator/goal/targetjitstandalone.py @@ -28,7 +28,9 @@ def entry_point(argv): if len(argv) < 3: - print "Usage: jitstandalone " + print "Usage: jitstandalone " + print "runs a total of '2 * count1 * count2' iterations" + return 0 count1 = int(argv[1]) count2 = int(argv[2]) s = 0 From noreply at buildbot.pypy.org Mon Jul 29 16:55:16 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 29 Jul 2013 16:55:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix ptr_eq and add a test for it in zrpy_gc_test.py Message-ID: <20130729145516.2F1381C00B1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65791:796f9faedadc Date: 2013-07-29 16:54 +0200 http://bitbucket.org/pypy/pypy/changeset/796f9faedadc/ Log: fix ptr_eq and add a test for it in zrpy_gc_test.py diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -74,10 +74,10 @@ ("translation.gctransformer", "boehm")], "minimark": [("translation.gctransformer", "framework")], "stmgc": [("translation.gctransformer", "framework"), - ("translation.gcrootfinder", "stm")], + ("translation.gcrootfinder", "stm"), + ("translation.gcremovetypeptr", False)], }, suggests = { - "stmgc": [("translation.gcremovetypeptr", True)], }, cmdline="--gc"), ChoiceOption("gctransformer", "GC transformer that is used - internal", diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -45,9 +45,11 @@ rgc.collect() rgc.collect(); rgc.collect() freed = 0 - for r in r_list: + for i, r in enumerate(r_list): if r() is None: freed += 1 + else: + print "not freed:", r(), "pos:", i print freed return 0 @@ -79,10 +81,11 @@ if gcrootfinder == 'stm': t.config.translation.stm = True t.config.translation.gc = 'stmgc' + gc = 'stmgc' else: t.config.translation.gc = gc # - if gc != 'boehm': + if gc != 'boehm' and gc != 'stmgc': t.config.translation.gcremovetypeptr = True for name, value in kwds.items(): setattr(t.config.translation, name, value) @@ -777,3 +780,33 @@ def test_compile_framework_call_assembler(self): self.run('compile_framework_call_assembler') + + def define_compile_framework_ptr_eq(cls): + # test ptr_eq + def raiseassert(cond): + if not bool(cond): + raise AssertionError + + def before(n, x): + x0 = X() + x1 = X() + ptrs = [None, x0, x1, X()] + return (n, x, x0, x1, None, None, None, + None, None, None, ptrs, None) + + @unroll_safe + def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, ptrs, s): + raiseassert(x0 != ptrs[0]) + raiseassert(x0 == ptrs[1]) + raiseassert(x0 != ptrs[2]) + raiseassert(x0 != ptrs[3]) + raiseassert(x1 != ptrs[0]) + raiseassert(x1 != ptrs[1]) + raiseassert(x1 == ptrs[2]) + raiseassert(x1 != ptrs[3]) + # + return n - 1, x, x0, x1, x2, x3, x4, x5, x6, x7, ptrs, s + return before, f, None + + def test_compile_framework_ptr_eq(self): + self.run('compile_framework_ptr_eq') diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1059,7 +1059,7 @@ if not self.cpu.gc_ll_descr.stm: self.genop_guard_int_eq(op, guard_op, guard_token, arglocs, result_loc) - assert not self.cpu.gc_ll_descr.stm + assert self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) if guard_opnum == rop.GUARD_FALSE: @@ -1072,7 +1072,7 @@ if not self.cpu.gc_ll_descr.stm: self.genop_guard_int_ne(op, guard_op, guard_token, arglocs, result_loc) - assert not self.cpu.gc_ll_descr.stm + assert self.cpu.gc_ll_descr.stm guard_opnum = guard_op.getopnum() self._stm_ptr_eq_fastpath(self.mc, arglocs, result_loc) if guard_opnum == rop.GUARD_FALSE: @@ -2179,7 +2179,8 @@ mc.CMP(X86_64_SCRATCH_REG, b_base) mc.SET_ir(rx86.Conditions['Z'], sl.value) mc.MOVZX8_rr(X86_64_SCRATCH_REG.value, sl.value) - mc.TEST(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) + # mc.TEST8_rr() without movzx8 + mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) mc.J_il8(rx86.Conditions['NZ'], 0) j_ok1 = mc.get_relative_pos() @@ -2188,7 +2189,7 @@ mc.J_il8(rx86.Conditions['Z'], 0) j_ok2 = mc.get_relative_pos() # - mc.CMP(a_base, imm(0)) + mc.CMP(b_base, imm(0)) mc.J_il8(rx86.Conditions['Z'], 0) j_ok3 = mc.get_relative_pos() @@ -2203,7 +2204,6 @@ func = self.ptr_eq_slowpath mc.CALL(imm(func)) # result still on stack - assert isinstance(result_loc, RegLoc) mc.POP_r(X86_64_SCRATCH_REG.value) # set flags: mc.TEST(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) From noreply at buildbot.pypy.org Mon Jul 29 17:17:22 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 17:17:22 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130729151722.F31351C0D35@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65792:33ad6ccc9a91 Date: 2013-07-29 16:18 +0200 http://bitbucket.org/pypy/pypy/changeset/33ad6ccc9a91/ Log: Fix. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -207,7 +207,7 @@ def descr_contains(self, space, w_sub): if space.isinstance_w(w_sub, space.w_unicode): self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return space.newbool(self_as_unicode._value.find(self._op_val(space, w_sub)) >= 0) + return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) return StringMethods.descr_contains(self, space, w_sub) @unwrap_spec(count=int) From noreply at buildbot.pypy.org Mon Jul 29 17:17:24 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 17:17:24 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Use space.listview_{str, unicode} in descr_join(). Message-ID: <20130729151724.3CAEC1C0D35@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65793:f0078474599f Date: 2013-07-29 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/f0078474599f/ Log: Use space.listview_{str,unicode} in descr_join(). diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -387,11 +387,21 @@ @specialize.argtype(0) def descr_join(self, space, w_list): - #l = space.listview_str(w_list) - #if l is not None: - # if len(l) == 1: - # return space.wrap(l[0]) - # return space.wrap(self._val(space).join(l)) + from pypy.objspace.std.bytesobject import W_BytesObject + from pypy.objspace.std.unicodeobject import W_UnicodeObject + + if isinstance(self, W_BytesObject): + l = space.listview_str(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + elif isinstance(self, W_UnicodeObject): + l = space.listview_unicode(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) list_w = space.listview(w_list) size = len(list_w) From noreply at buildbot.pypy.org Mon Jul 29 17:17:25 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 17:17:25 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix translation. Message-ID: <20130729151725.6C3AE1C0D35@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65794:3788cc2a0262 Date: 2013-07-29 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/3788cc2a0262/ Log: Fix translation. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -206,6 +206,8 @@ def descr_contains(self, space, w_sub): if space.isinstance_w(w_sub, space.w_unicode): + from pypy.objspace.std.unicodeobject import W_UnicodeObject + assert isinstance(w_sub, W_UnicodeObject) self_as_unicode = unicode_from_encoded_object(space, self, None, None) return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) return StringMethods.descr_contains(self, space, w_sub) From noreply at buildbot.pypy.org Mon Jul 29 17:17:26 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 17:17:26 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix marshalling of string types. Message-ID: <20130729151726.9D5831C0D35@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65795:d0e72051a49f Date: 2013-07-29 16:53 +0200 http://bitbucket.org/pypy/pypy/changeset/d0e72051a49f/ Log: Fix marshalling of string types. diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -251,11 +251,11 @@ def PySTRING_CHECK_INTERNED(w_str): return False -def marshal_w__Bytes(space, w_str, m): - # using the fastest possible access method here - # that does not touch the internal representation, - # which might change (array of bytes?) - s = w_str.unwrap(space) +def marshal_bytes(space, w_str, m): + if not isinstance(w_str, W_BytesObject): + raise_exception(space, "unmarshallable object") + + s = space.str_w(w_str) if m.version >= 1 and PySTRING_CHECK_INTERNED(w_str): # we use a native rtyper stringdict for speed idx = m.stringtable.get(s, -1) @@ -267,10 +267,11 @@ m.atom_str(TYPE_INTERNED, s) else: m.atom_str(TYPE_STRING, s) +handled_by_any.append(('str', marshal_bytes)) -def unmarshal_String(space, u, tc): +def unmarshal_bytes(space, u, tc): return space.wrap(u.get_str()) -register(TYPE_STRING, unmarshal_String) +register(TYPE_STRING, unmarshal_bytes) def unmarshal_interned(space, u, tc): w_ret = space.wrap(u.get_str()) @@ -410,13 +411,16 @@ return space.wrap(code) register(TYPE_CODE, unmarshal_pycode) -def marshal_w__Unicode(space, w_unicode, m): +def marshal_unicode(space, w_unicode, m): + if not isinstance(w_unicode, W_UnicodeObject): + raise_exception(space, "unmarshallable object") s = unicodehelper.encode_utf8(space, space.unicode_w(w_unicode)) m.atom_str(TYPE_UNICODE, s) +handled_by_any.append(('unicode', marshal_unicode)) -def unmarshal_Unicode(space, u, tc): +def unmarshal_unicode(space, u, tc): return space.wrap(unicodehelper.decode_utf8(space, u.get_str())) -register(TYPE_UNICODE, unmarshal_Unicode) +register(TYPE_UNICODE, unmarshal_unicode) app = gateway.applevel(r''' def tuple_to_set(datalist, frozen=False): From noreply at buildbot.pypy.org Mon Jul 29 17:17:28 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 29 Jul 2013 17:17:28 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130729151728.5906A1C0D35@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65796:d42a088324cb Date: 2013-07-29 17:12 +0200 http://bitbucket.org/pypy/pypy/changeset/d42a088324cb/ Log: Fix. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -515,6 +515,9 @@ space.wrap("empty separator")) pos = value.rfind(sub) if pos == -1: + from pypy.objspace.std.bytearrayobject import W_BytearrayObject + if isinstance(self, W_BytearrayObject): + self = self._new(value) return space.newtuple([self._empty(), self._empty(), self]) else: from pypy.objspace.std.bytearrayobject import W_BytearrayObject From noreply at buildbot.pypy.org Mon Jul 29 22:36:51 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 29 Jul 2013 22:36:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Removed some more OO specific code Message-ID: <20130729203651.3B4321C00B1@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65797:1b006d0e4bcd Date: 2013-07-29 13:36 -0700 http://bitbucket.org/pypy/pypy/changeset/1b006d0e4bcd/ Log: Removed some more OO specific code diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -187,10 +187,6 @@ # with Voids removed raise NotImplementedError - def methdescrof(self, SELFTYPE, methname): - # must return a subclass of history.AbstractMethDescr - raise NotImplementedError - def typedescrof(self, TYPE): raise NotImplementedError diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -34,7 +34,6 @@ return 'int' # singlefloats are stored in an int if TYPE in (lltype.Float, lltype.SingleFloat): raise NotImplementedError("type %s not supported" % TYPE) - # XXX fix this for oo... if (TYPE != llmemory.Address and rffi.sizeof(TYPE) > rffi.sizeof(lltype.Signed)): if supports_longlong and TYPE is not lltype.LongFloat: @@ -168,18 +167,11 @@ def __init__(self, identifier=None): self.identifier = identifier # for testing + class BasicFailDescr(AbstractFailDescr): def __init__(self, identifier=None): self.identifier = identifier # for testing -class AbstractMethDescr(AbstractDescr): - # the base class of the result of cpu.methdescrof() - jitcodes = None - def setup(self, jitcodes): - # jitcodes maps { runtimeClass -> jitcode for runtimeClass.methname } - self.jitcodes = jitcodes - def get_jitcode_for_class(self, oocls): - return self.jitcodes[oocls] class Const(AbstractValue): __slots__ = () diff --git a/rpython/rlib/longlong2float.py b/rpython/rlib/longlong2float.py --- a/rpython/rlib/longlong2float.py +++ b/rpython/rlib/longlong2float.py @@ -68,14 +68,12 @@ uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__uint2singlefloat") + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__singlefloat2uint") + _nowrapper=True, elidable_function=True, sandboxsafe=True) class Float2LongLongEntry(ExtRegistryEntry): diff --git a/rpython/rlib/rlocale.py b/rpython/rlib/rlocale.py --- a/rpython/rlib/rlocale.py +++ b/rpython/rlib/rlocale.py @@ -193,11 +193,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') -isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') -islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') -tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') -isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') +isalpha = external('isalpha', [rffi.INT], rffi.INT) +isupper = external('isupper', [rffi.INT], rffi.INT) +islower = external('islower', [rffi.INT], rffi.INT) +tolower = external('tolower', [rffi.INT], rffi.INT) +isalnum = external('isalnum', [rffi.INT], rffi.INT) if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -62,8 +62,8 @@ compilation_info=ExternalCompilationInfo(), sandboxsafe=False, threadsafe='auto', _nowrapper=False, calling_conv='c', - oo_primitive=None, elidable_function=False, - macro=None, random_effects_on_gcobjs='auto'): + elidable_function=False, macro=None, + random_effects_on_gcobjs='auto'): """Build an external function that will invoke the C function 'name' with the given 'args' types and 'result' type. @@ -97,8 +97,6 @@ if elidable_function: _callable._elidable_function_ = True kwds = {} - if oo_primitive: - kwds['oo_primitive'] = oo_primitive has_callback = False for ARG in args: From noreply at buildbot.pypy.org Mon Jul 29 22:42:18 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 29 Jul 2013 22:42:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Some more OO specific code Message-ID: <20130729204218.6C0211C00B1@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r65798:2f7eaa5b4466 Date: 2013-07-29 13:41 -0700 http://bitbucket.org/pypy/pypy/changeset/2f7eaa5b4466/ Log: Some more OO specific code diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -185,11 +185,8 @@ SetEndOfFile = rffi.llexternal('SetEndOfFile', [HANDLE], BOOL, compilation_info=_eci) - # HACK: These implementations are specific to MSVCRT and the C backend. - # When generating on CLI or JVM, these are patched out. - # See PyPyTarget.target() in targetpypystandalone.py def _setfd_binary(fd): - #Allow this to succeed on invalid fd's + # Allow this to succeed on invalid fd's if rposix.is_valid_fd(fd): _setmode(fd, os.O_BINARY) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -58,7 +58,6 @@ self.classdef_to_pytypeobject = {} self.concrete_calltables = {} self.class_pbc_attributes = {} - self.oo_meth_impls = {} self.cache_dummy_values = {} self.lltype2vtable = {} self.typererrors = [] diff --git a/rpython/translator/backendopt/test/test_removenoops.py b/rpython/translator/backendopt/test/test_removenoops.py --- a/rpython/translator/backendopt/test/test_removenoops.py +++ b/rpython/translator/backendopt/test/test_removenoops.py @@ -97,9 +97,8 @@ def test_remove_unaryops(): - # We really want to use remove_unaryops for things like ooupcast and - # oodowncast in dynamically typed languages, but it's easier to test - # it with operations on ints here. + # We really want to use remove_unaryops for more complex operations, but + # it's easier to test it with operations on ints here. def f(x): i = llop.int_invert(lltype.Signed, x) i = llop.int_add(lltype.Signed, x, 1) From noreply at buildbot.pypy.org Mon Jul 29 22:44:44 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 29 Jul 2013 22:44:44 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130729204444.363521C00B1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65799:2ff994785f16 Date: 2013-07-29 13:38 -0700 http://bitbucket.org/pypy/pypy/changeset/2ff994785f16/ Log: merge default diff too long, truncating to 2000 out of 48190 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -50,11 +50,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -332,10 +327,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -343,10 +334,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,65 +65,56 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ @@ -258,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -300,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is @@ -84,12 +83,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -72,8 +72,6 @@ `command line reference`_ -`CLI backend`_ describes the details of the .NET backend. - `JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler from our Python interpreter. diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -432,226 +432,6 @@ See for example `rpython/rtyper/rlist.py`_. -.. _`oo type`: - -Object Oriented Types ---------------------- - -The standard `low-level type` model described above is fine for -targeting low level backends such as C, but it is not good -enough for targeting higher level backends such as .NET CLI or Java -JVM, so a new object oriented model has been introduced. This model is -implemented in the first part of `rpython/rtyper/ootypesystem/ootype.py`_. - -As for the low-level typesystem, the second part of -`rpython/rtyper/ootypesystem/ootype.py`_ is a runnable implementation of -these types, for testing purposes. - - -The target platform -+++++++++++++++++++ - -There are plenty of object oriented languages and platforms around, -each one with its own native features: they could be statically or -dynamically typed, they could support or not things like multiple -inheritance, classes and functions as first class order objects, -generics, and so on. - -The goal of *ootypesystem* is to define a trade-off between all -the potential backends that let them to use the native facilities when -available while not preventing other backends to work when they -aren't. - - -Types and classes -+++++++++++++++++ - -Most of the primitive types defined in *ootypesystem* are the very -same of those found in *lltypesystem*: ``Bool``, ``Signed``, -``Unsigned``, ``Float``, ``Char``, ``UniChar`` and ``Void``. - -The target platform is supposed to support classes and instances with -**single inheritance**. Instances of user-defined classes are mapped -to the ``Instance`` type, whose ``_superclass`` attribute indicates -the base class of the instance. At the very beginning of the -inheritance hierarchy there is the ``Root`` object, i.e. the common -base class between all instances; if the target platform has the -notion of a common base class too, the backend can choose to map the -``Root`` class to its native equivalent. - -Object of ``Instance`` type can have attributes and methods: -attributes are got and set by the ``oogetfield`` and ``oosetfield`` -operations, while method calls are expressed by the ``oosend`` -operation. - -Classes are passed around using the ``Class`` type: this is a first -order class type whose only goal is to allow **runtime instantiation** -of the class. Backends that don't support this feature natively, such -as Java, may need to use some sort of placeholder instead. - - -Static vs. dynamic typing -+++++++++++++++++++++++++ - -The target platform is assumed to be **statically typed**, i.e. the -type of each object is known at compile time. - -As usual, it is possible to convert an object from type to type only -under certain conditions; there is a number of predefined conversions -between primitive types such as from ``Bool`` to ``Signed`` or from -``Signed`` to ``Float``. For each one of these conversions there is a -corresponding low level operation, such as ``cast_bool_to_int`` and -``cast_int_to_float``. - -Moreover it is possible to cast instances of a class up and down the -inheritance hierarchy with the ``ooupcast`` and ``oodowncast`` low -level operations. Implicit upcasting is not allowed, so you really -need to do a ``ooupcast`` for converting from a subclass to a -superclass. - -With this design statically typed backends can trivially insert -appropriate casts when needed, while dynamically typed backends can -simply ignore some of the operation such as ``ooupcast`` and -``oodowncast``. Backends that supports implicit upcasting, such as CLI -and Java, can simply ignore only ``ooupcast``. - -Object model -++++++++++++ - -The object model implemented by ootype is quite Java-like. The -following is a list of key features of the ootype object model which -have a direct correspondence in the Java or .NET object model: - - - classes have a static set of strongly typed methods and - attributes; - - - methods can be overriden in subclasses; every method is "virtual" - (i.e., can be overridden); methods can be "abstract" (i.e., need - to be overridden in subclasses); - - - classes support single inheritance; all classes inherit directly - or indirectly from the ROOT class; - - - there is some support for method overloading. This feature is not - used by the RTyper itself because RPython doesn't support method - overloading, but it is used by the GenCLI backend for offering - access to the native .NET libraries (see XXX); - - - all classes, attributes and methods are public: ootype is only - used internally by the translator, so there is no need to enforce - accessibility rules; - - - classes and functions are first-class order objects: this feature - can be easily simulated by backends for platforms on which it is not - a native feature; - - - there is a set of `built-in types`_ offering standard features. - -Exception handling -++++++++++++++++++ - -Since flow graphs are meant to be used also for very low level -backends such as C, they are quite unstructured: this means that the -target platform doesn't need to have a native exception handling -mechanism, since at the very least the backend can handle exceptions -just like ``genc`` does. - -By contrast we know that most of high level platforms natively support -exception handling, so *ootypesystem* is designed to let them to use -it. In particular the exception instances are typed with the -``Instance`` type, so the usual inheritance exception hierarchy is -preserved and the native way to catch exception should just work. - -.. `built-in types`_ - -Built-in types -++++++++++++++ - -It seems reasonable to assume high level platforms to provide built-in -facilities for common types such as *lists* or *hashtables*. - -RPython standard types such as ``List`` and ``Dict`` are implemented -on top of these common types; at the moment of writing there are six -built-in types: - - - **String**: self-descriptive - - - **StringBuilder**: used for dynamic building of string - - - **List**: a variable-sized, homogeneous list of object - - - **Dict**: a hashtable of homogeneous keys and values - - - **CustomDict**: same as dict, but with custom equal and hash - functions - - - **DictItemsIterator**: a helper class for iterating over the - elements of a ``Dict`` - - -Each of these types is a subtype of ``BuiltinADTType`` and has set of -ADT (Abstract Data Type) methods (hence the name of the base class) -for being manipulated. Examples of ADT methods are ``ll_length`` for -``List`` and ``ll_get`` for ``Dict``. - -From the backend point of view an instance of a built-in types is -treated exactly as a plain ``Instance``, so usually no special-casing -is needed. The backend is supposed to provide a bunch of classes -wrapping the native ones in order to provide the right signature and -semantic for the ADT methods. - -As an alternative, backends can special-case the ADT types to map them -directly to the native equivalent, translating the method names -on-the-fly at compile time. - -Generics -++++++++ - -Some target platforms offer native support for **generics**, i.e. -classes that can be parametrized on types, not only values. For -example, if one wanted to create a list using generics, a possible -declaration would be to say ``List``, where ``T`` represented the -type. When instantiated, one could create ``List`` or -``List``. The list is then treated as a list of whichever type -is specified. - -Each subclass of ``BuiltinADTTypes`` defines a bunch of type -parameters by creating some class level placeholder in the form of -``PARAMNAME_T``; then it fills up the ``_GENERIC_METHODS`` attribute -by defining the signature of each of the ADT methods using those From noreply at buildbot.pypy.org Mon Jul 29 22:44:45 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 29 Jul 2013 22:44:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k: apply 9fad3a8b4208 from default Message-ID: <20130729204445.88A411C00B1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65800:c6070a1abed2 Date: 2013-07-29 13:42 -0700 http://bitbucket.org/pypy/pypy/changeset/c6070a1abed2/ Log: apply 9fad3a8b4208 from default diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py --- a/lib-python/3/distutils/sysconfig_pypy.py +++ b/lib-python/3/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -124,11 +125,19 @@ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() + cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from .sysconfig_cpython import ( From noreply at buildbot.pypy.org Mon Jul 29 23:03:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 29 Jul 2013 23:03:13 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Thanks Romain for the booking Message-ID: <20130729210313.8840D1C00B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5013:2511be10f422 Date: 2013-07-29 23:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/2511be10f422/ Log: Thanks Romain for the booking diff --git a/sprintinfo/london-2013/people.txt b/sprintinfo/london-2013/people.txt --- a/sprintinfo/london-2013/people.txt +++ b/sprintinfo/london-2013/people.txt @@ -12,10 +12,10 @@ ==================== ============== ======================= Carl Friedrich Bolz ? Lukas Lukas Diekmann lives there -Romain Guillebert ? ? +Romain Guillebert ? hotel LSE Northumberl. Laurence Tratt lives there Edd Barrett ? ? -Armin Rigo ? hotel I'd like to share +Armin Rigo 25/8-2/9 hotel LSE Northumberl. Richard Emslie 25/8-2/9 some hotel Remi Meier 24/8-1/9 ? Marko Bencun 24/8-1/9 ? From noreply at buildbot.pypy.org Mon Jul 29 23:52:36 2013 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 29 Jul 2013 23:52:36 +0200 (CEST) Subject: [pypy-commit] pypy default: test, fix for uninitialized Scalar.value Message-ID: <20130729215236.EB61E1C00B1@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65801:78634b86b451 Date: 2013-07-30 00:49 +0300 http://bitbucket.org/pypy/pypy/changeset/78634b86b451/ Log: test, fix for uninitialized Scalar.value diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -37,7 +37,8 @@ from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: - impl = scalar.Scalar(dtype.base) + w_val = dtype.base.coerce(space, space.wrap(0)) + impl = scalar.Scalar(dtype.base, w_val) else: strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, @@ -79,6 +80,8 @@ if w_val is not None: w_val = dtype.coerce(space, w_val) + else: + w_val = dtype.coerce(space, space.wrap(0)) return W_NDimArray(scalar.Scalar(dtype, w_val)) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -264,6 +264,8 @@ assert a.dtype is dtype(int) a = ndarray([], dtype=float) assert a.shape == () + # test uninitialized value crash? + assert len(str(a)) > 0 def test_ndmin(self): from numpypy import array From noreply at buildbot.pypy.org Tue Jul 30 00:23:50 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Tue, 30 Jul 2013 00:23:50 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4-fixed-class: fixed support for class in stdlib 2.7.4 Message-ID: <20130729222350.07D431C00B1@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: stdlib-2.7.4-fixed-class Changeset: r65802:ab9512fa99c0 Date: 2013-07-29 10:23 -0300 http://bitbucket.org/pypy/pypy/changeset/ab9512fa99c0/ Log: fixed support for class in stdlib 2.7.4 diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -116,6 +116,9 @@ return None def descr_getattribute(self, space, w_attr): + if not space.isinstance_w(w_attr, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = unwrap_attr(space, w_attr) if name and name[0] == "_": if name == "__dict__": @@ -137,6 +140,9 @@ return space.call_function(w_descr_get, w_value, space.w_None, self) def descr_setattr(self, space, w_attr, w_value): + if not space.isinstance_w(w_attr, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = unwrap_attr(space, w_attr) if name and name[0] == "_": if name == "__dict__": @@ -370,6 +376,9 @@ return None def descr_getattribute(self, space, w_attr): + if not space.isinstance_w(w_attr, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = space.str_w(w_attr) if len(name) >= 8 and name[0] == '_': if name == "__dict__": @@ -379,6 +388,9 @@ return self.getattr(space, name) def descr_setattr(self, space, w_name, w_value): + if not space.isinstance_w(w_name, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = unwrap_attr(space, w_name) w_meth = self.getattr_from_class(space, '__setattr__') if name and name[0] == "_": diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1078,6 +1078,13 @@ b = 2 assert self.is_strdict(A) + def test_attr_slots(self): + class C: + pass + for c in C, C(): + raises(TypeError, type(c).__getattribute__, c, []) + raises(TypeError, type(c).__setattr__, c, [], []) + class AppTestOldStyleMapDict(AppTestOldstyle): spaceconfig = {"objspace.std.withmapdict": True} From noreply at buildbot.pypy.org Tue Jul 30 00:23:51 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 00:23:51 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Merged in andrewsmedina/numpypy/stdlib-2.7.4-fixed-class (pull request #172) Message-ID: <20130729222351.7261F1C00B1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.4 Changeset: r65803:37f208085f71 Date: 2013-07-29 15:23 -0700 http://bitbucket.org/pypy/pypy/changeset/37f208085f71/ Log: Merged in andrewsmedina/numpypy/stdlib-2.7.4-fixed-class (pull request #172) fixed support for class in stdlib 2.7.4 diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -116,6 +116,9 @@ return None def descr_getattribute(self, space, w_attr): + if not space.isinstance_w(w_attr, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = unwrap_attr(space, w_attr) if name and name[0] == "_": if name == "__dict__": @@ -137,6 +140,9 @@ return space.call_function(w_descr_get, w_value, space.w_None, self) def descr_setattr(self, space, w_attr, w_value): + if not space.isinstance_w(w_attr, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = unwrap_attr(space, w_attr) if name and name[0] == "_": if name == "__dict__": @@ -370,6 +376,9 @@ return None def descr_getattribute(self, space, w_attr): + if not space.isinstance_w(w_attr, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = space.str_w(w_attr) if len(name) >= 8 and name[0] == '_': if name == "__dict__": @@ -379,6 +388,9 @@ return self.getattr(space, name) def descr_setattr(self, space, w_name, w_value): + if not space.isinstance_w(w_name, space.w_str): + msg = "attribute name must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) name = unwrap_attr(space, w_name) w_meth = self.getattr_from_class(space, '__setattr__') if name and name[0] == "_": diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1078,6 +1078,13 @@ b = 2 assert self.is_strdict(A) + def test_attr_slots(self): + class C: + pass + for c in C, C(): + raises(TypeError, type(c).__getattribute__, c, []) + raises(TypeError, type(c).__setattr__, c, [], []) + class AppTestOldStyleMapDict(AppTestOldstyle): spaceconfig = {"objspace.std.withmapdict": True} From noreply at buildbot.pypy.org Tue Jul 30 00:33:35 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Tue, 30 Jul 2013 00:33:35 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4-fixed-io: fixed support for io in stdlib 2.7.4 Message-ID: <20130729223335.D7B511C00B1@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: stdlib-2.7.4-fixed-io Changeset: r65804:43221a2e8365 Date: 2013-07-28 18:46 -0300 http://bitbucket.org/pypy/pypy/changeset/43221a2e8365/ Log: fixed support for io in stdlib 2.7.4 diff --git a/lib-python/2.7/test/test_io.py b/lib-python/2.7/test/test_io.py --- a/lib-python/2.7/test/test_io.py +++ b/lib-python/2.7/test/test_io.py @@ -1004,6 +1004,7 @@ support.gc_collect() self.assertTrue(wr() is None, wr) + @support.impl_detail(cpython=True) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedReader"): @@ -1302,6 +1303,7 @@ with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"123xxx") + @support.impl_detail(cpython=True) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedWriter"): @@ -1676,6 +1678,7 @@ CBufferedReaderTest.test_garbage_collection(self) CBufferedWriterTest.test_garbage_collection(self) + @support.impl_detail(cpython=True) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedRandom"): diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1,7 +1,7 @@ import sys from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, generic_new_descr, interp_attrproperty, @@ -327,6 +327,13 @@ self.flags = flags self.input = input + +def check_decoded(space, w_decoded): + if not space.isinstance_w(w_decoded, space.w_unicode): + msg = "decoder should return a string result, not '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_decoded) + + class W_TextIOWrapper(W_TextIOBase): def __init__(self, space): W_TextIOBase.__init__(self, space) @@ -546,9 +553,15 @@ # Read a chunk, decode it, and put the result in self._decoded_chars w_input = space.call_method(self.w_buffer, "read1", space.wrap(self.chunk_size)) + + if not space.isinstance_w(w_input, space.w_str): + msg = "decoder getstate() should have returned a bytes object not '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_input) + eof = space.len_w(w_input) == 0 w_decoded = space.call_method(self.w_decoder, "decode", w_input, space.wrap(eof)) + check_decoded(space, w_decoded) self._set_decoded_chars(space.unicode_w(w_decoded)) if space.len_w(w_decoded) > 0: eof = False @@ -577,10 +590,12 @@ size = convert_size(space, w_size) self._writeflush(space) + if size < 0: # Read everything w_bytes = space.call_method(self.w_buffer, "read") w_decoded = space.call_method(self.w_decoder, "decode", w_bytes, space.w_True) + check_decoded(space, w_decoded) w_result = space.wrap(self._get_decoded_chars(-1)) w_final = space.add(w_result, w_decoded) self.snapshot = None @@ -701,6 +716,10 @@ if not self.w_encoder: raise OperationError(space.w_IOError, space.wrap("not writable")) + if not space.isinstance_w(w_text, space.w_unicode): + msg = "unicode argument expected, got '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_text) + text = space.unicode_w(w_text) textlen = len(text) @@ -845,11 +864,16 @@ # Just like _read_chunk, feed the decoder and save a snapshot. w_chunk = space.call_method(self.w_buffer, "read", space.wrap(cookie.bytes_to_feed)) + if not space.isinstance_w(w_chunk, space.w_str): + msg = "underlying read() should have returned a bytes object, not '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_chunk) + self.snapshot = PositionSnapshot(cookie.dec_flags, space.str_w(w_chunk)) w_decoded = space.call_method(self.w_decoder, "decode", w_chunk, space.wrap(cookie.need_eof)) + check_decoded(space, w_decoded) self._set_decoded_chars(space.unicode_w(w_decoded)) # Skip chars_to_skip of the decoded characters @@ -918,6 +942,7 @@ while i < len(input): w_decoded = space.call_method(self.w_decoder, "decode", space.wrap(input[i])) + check_decoded(space, w_decoded) chars_decoded += len(space.unicode_w(w_decoded)) cookie.bytes_to_feed += 1 @@ -942,6 +967,7 @@ w_decoded = space.call_method(self.w_decoder, "decode", space.wrap(""), space.wrap(1)) # final=1 + check_decoded(space, w_decoded) chars_decoded += len(space.unicode_w(w_decoded)) cookie.need_eof = 1 diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -98,6 +98,13 @@ f.close() f2.close() + def test_writelines_error(self): + import _io + txt = _io.TextIOWrapper(_io.BytesIO()) + raises(TypeError, txt.writelines, [1, 2, 3]) + raises(TypeError, txt.writelines, None) + raises(TypeError, txt.writelines, b'abc') + def test_seek(self): import _io f = _io.FileIO(self.tmpfile, 'rb') @@ -195,7 +202,7 @@ space.appexec([space.wrap(str(tmpfile))], """(tmpfile): import io f = io.open(tmpfile, 'w', encoding='ascii') - f.write('42') + f.write(u'42') # no flush() and no close() import sys; sys._keepalivesomewhereobscure = f """) diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -216,6 +216,29 @@ raises(IOError, txt.close) # exception not swallowed assert txt.closed + def test_illegal_decoder(self): + import _io + t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', + encoding='quopri_codec') + raises(TypeError, t.read, 1) + t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', + encoding='quopri_codec') + raises(TypeError, t.readline) + t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', + encoding='quopri_codec') + raises(TypeError, t.read) + + def test_read_nonbytes(self): + import _io + class NonbytesStream(_io.StringIO): + read1 = _io.StringIO.read + t = _io.TextIOWrapper(NonbytesStream(u'a')) + raises(TypeError, t.read, 1) + t = _io.TextIOWrapper(NonbytesStream(u'a')) + raises(TypeError, t.readline) + t = _io.TextIOWrapper(NonbytesStream(u'a')) + t.read() == u'a' + class AppTestIncrementalNewlineDecoder: def test_newline_decoder(self): From noreply at buildbot.pypy.org Tue Jul 30 00:33:37 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Tue, 30 Jul 2013 00:33:37 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4-fixed-io: pep8 fix Message-ID: <20130729223337.3FE2E1C00B1@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: stdlib-2.7.4-fixed-io Changeset: r65805:2acd05e29823 Date: 2013-07-29 19:30 -0300 http://bitbucket.org/pypy/pypy/changeset/2acd05e29823/ Log: pep8 fix diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -555,7 +555,8 @@ space.wrap(self.chunk_size)) if not space.isinstance_w(w_input, space.w_str): - msg = "decoder getstate() should have returned a bytes object not '%T'" + msg = "decoder getstate() should have returned a bytes " \ + "object not '%T'" raise operationerrfmt(space.w_TypeError, msg, w_input) eof = space.len_w(w_input) == 0 @@ -865,7 +866,8 @@ w_chunk = space.call_method(self.w_buffer, "read", space.wrap(cookie.bytes_to_feed)) if not space.isinstance_w(w_chunk, space.w_str): - msg = "underlying read() should have returned a bytes object, not '%T'" + msg = "underlying read() should have returned " \ + "a bytes object, not '%T'" raise operationerrfmt(space.w_TypeError, msg, w_chunk) self.snapshot = PositionSnapshot(cookie.dec_flags, From noreply at buildbot.pypy.org Tue Jul 30 00:33:38 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 00:33:38 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.4: Merged in andrewsmedina/numpypy/stdlib-2.7.4-fixed-io (pull request #170) Message-ID: <20130729223338.9579D1C00B1@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.4 Changeset: r65806:0f6b5939f7a6 Date: 2013-07-29 15:33 -0700 http://bitbucket.org/pypy/pypy/changeset/0f6b5939f7a6/ Log: Merged in andrewsmedina/numpypy/stdlib-2.7.4-fixed-io (pull request #170) fixed support for io in stdlib 2.7.4 diff --git a/lib-python/2.7/test/test_io.py b/lib-python/2.7/test/test_io.py --- a/lib-python/2.7/test/test_io.py +++ b/lib-python/2.7/test/test_io.py @@ -1004,6 +1004,7 @@ support.gc_collect() self.assertTrue(wr() is None, wr) + @support.impl_detail(cpython=True) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedReader"): @@ -1302,6 +1303,7 @@ with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"123xxx") + @support.impl_detail(cpython=True) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedWriter"): @@ -1676,6 +1678,7 @@ CBufferedReaderTest.test_garbage_collection(self) CBufferedWriterTest.test_garbage_collection(self) + @support.impl_detail(cpython=True) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedRandom"): diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -1,7 +1,7 @@ import sys from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, generic_new_descr, interp_attrproperty, @@ -327,6 +327,13 @@ self.flags = flags self.input = input + +def check_decoded(space, w_decoded): + if not space.isinstance_w(w_decoded, space.w_unicode): + msg = "decoder should return a string result, not '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_decoded) + + class W_TextIOWrapper(W_TextIOBase): def __init__(self, space): W_TextIOBase.__init__(self, space) @@ -546,9 +553,16 @@ # Read a chunk, decode it, and put the result in self._decoded_chars w_input = space.call_method(self.w_buffer, "read1", space.wrap(self.chunk_size)) + + if not space.isinstance_w(w_input, space.w_str): + msg = "decoder getstate() should have returned a bytes " \ + "object not '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_input) + eof = space.len_w(w_input) == 0 w_decoded = space.call_method(self.w_decoder, "decode", w_input, space.wrap(eof)) + check_decoded(space, w_decoded) self._set_decoded_chars(space.unicode_w(w_decoded)) if space.len_w(w_decoded) > 0: eof = False @@ -577,10 +591,12 @@ size = convert_size(space, w_size) self._writeflush(space) + if size < 0: # Read everything w_bytes = space.call_method(self.w_buffer, "read") w_decoded = space.call_method(self.w_decoder, "decode", w_bytes, space.w_True) + check_decoded(space, w_decoded) w_result = space.wrap(self._get_decoded_chars(-1)) w_final = space.add(w_result, w_decoded) self.snapshot = None @@ -701,6 +717,10 @@ if not self.w_encoder: raise OperationError(space.w_IOError, space.wrap("not writable")) + if not space.isinstance_w(w_text, space.w_unicode): + msg = "unicode argument expected, got '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_text) + text = space.unicode_w(w_text) textlen = len(text) @@ -845,11 +865,17 @@ # Just like _read_chunk, feed the decoder and save a snapshot. w_chunk = space.call_method(self.w_buffer, "read", space.wrap(cookie.bytes_to_feed)) + if not space.isinstance_w(w_chunk, space.w_str): + msg = "underlying read() should have returned " \ + "a bytes object, not '%T'" + raise operationerrfmt(space.w_TypeError, msg, w_chunk) + self.snapshot = PositionSnapshot(cookie.dec_flags, space.str_w(w_chunk)) w_decoded = space.call_method(self.w_decoder, "decode", w_chunk, space.wrap(cookie.need_eof)) + check_decoded(space, w_decoded) self._set_decoded_chars(space.unicode_w(w_decoded)) # Skip chars_to_skip of the decoded characters @@ -918,6 +944,7 @@ while i < len(input): w_decoded = space.call_method(self.w_decoder, "decode", space.wrap(input[i])) + check_decoded(space, w_decoded) chars_decoded += len(space.unicode_w(w_decoded)) cookie.bytes_to_feed += 1 @@ -942,6 +969,7 @@ w_decoded = space.call_method(self.w_decoder, "decode", space.wrap(""), space.wrap(1)) # final=1 + check_decoded(space, w_decoded) chars_decoded += len(space.unicode_w(w_decoded)) cookie.need_eof = 1 diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py --- a/pypy/module/_io/test/test_fileio.py +++ b/pypy/module/_io/test/test_fileio.py @@ -98,6 +98,13 @@ f.close() f2.close() + def test_writelines_error(self): + import _io + txt = _io.TextIOWrapper(_io.BytesIO()) + raises(TypeError, txt.writelines, [1, 2, 3]) + raises(TypeError, txt.writelines, None) + raises(TypeError, txt.writelines, b'abc') + def test_seek(self): import _io f = _io.FileIO(self.tmpfile, 'rb') @@ -195,7 +202,7 @@ space.appexec([space.wrap(str(tmpfile))], """(tmpfile): import io f = io.open(tmpfile, 'w', encoding='ascii') - f.write('42') + f.write(u'42') # no flush() and no close() import sys; sys._keepalivesomewhereobscure = f """) diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -216,6 +216,29 @@ raises(IOError, txt.close) # exception not swallowed assert txt.closed + def test_illegal_decoder(self): + import _io + t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', + encoding='quopri_codec') + raises(TypeError, t.read, 1) + t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', + encoding='quopri_codec') + raises(TypeError, t.readline) + t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n', + encoding='quopri_codec') + raises(TypeError, t.read) + + def test_read_nonbytes(self): + import _io + class NonbytesStream(_io.StringIO): + read1 = _io.StringIO.read + t = _io.TextIOWrapper(NonbytesStream(u'a')) + raises(TypeError, t.read, 1) + t = _io.TextIOWrapper(NonbytesStream(u'a')) + raises(TypeError, t.readline) + t = _io.TextIOWrapper(NonbytesStream(u'a')) + t.read() == u'a' + class AppTestIncrementalNewlineDecoder: def test_newline_decoder(self): From noreply at buildbot.pypy.org Tue Jul 30 09:01:58 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 09:01:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k: n/a on py3k Message-ID: <20130730070158.92F3C1C346F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r65807:c0832f4a3a98 Date: 2013-07-29 23:53 -0700 http://bitbucket.org/pypy/pypy/changeset/c0832f4a3a98/ Log: n/a on py3k diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -986,6 +986,7 @@ # BUILD_LIST_FROM_ARG is PyPy specific @py.test.mark.skipif('config.option.runappdirect') def test_build_list_from_arg_length_hint(self): + py3k_skip('XXX: BUILD_LIST_FROM_ARG list comps are genexps on py3k') hint_called = [False] class Foo(object): def __length_hint__(self): From noreply at buildbot.pypy.org Tue Jul 30 09:02:08 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 09:02:08 +0200 (CEST) Subject: [pypy-commit] pypy pypy3-release-2.1.x: merge py3k Message-ID: <20130730070208.63E7B1C346F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: pypy3-release-2.1.x Changeset: r65808:f2ffcfcdbe0b Date: 2013-07-29 23:54 -0700 http://bitbucket.org/pypy/pypy/changeset/f2ffcfcdbe0b/ Log: merge py3k diff too long, truncating to 2000 out of 49976 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py --- a/lib-python/3/distutils/sysconfig_pypy.py +++ b/lib-python/3/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -124,11 +125,19 @@ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() + cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from .sysconfig_cpython import ( diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -50,11 +50,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -332,10 +327,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -343,10 +334,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,65 +65,56 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ @@ -258,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -300,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is @@ -84,12 +83,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -72,8 +72,6 @@ `command line reference`_ -`CLI backend`_ describes the details of the .NET backend. - `JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler from our Python interpreter. diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst --- a/pypy/doc/release-2.1.0-beta2.rst +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -5,6 +5,10 @@ We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + Highlights ========== diff --git a/pypy/doc/rtyper.rst b/pypy/doc/rtyper.rst --- a/pypy/doc/rtyper.rst +++ b/pypy/doc/rtyper.rst @@ -432,226 +432,6 @@ See for example `rpython/rtyper/rlist.py`_. -.. _`oo type`: - -Object Oriented Types ---------------------- - -The standard `low-level type` model described above is fine for -targeting low level backends such as C, but it is not good -enough for targeting higher level backends such as .NET CLI or Java -JVM, so a new object oriented model has been introduced. This model is -implemented in the first part of `rpython/rtyper/ootypesystem/ootype.py`_. - -As for the low-level typesystem, the second part of -`rpython/rtyper/ootypesystem/ootype.py`_ is a runnable implementation of -these types, for testing purposes. - - -The target platform -+++++++++++++++++++ - -There are plenty of object oriented languages and platforms around, -each one with its own native features: they could be statically or -dynamically typed, they could support or not things like multiple -inheritance, classes and functions as first class order objects, -generics, and so on. - -The goal of *ootypesystem* is to define a trade-off between all -the potential backends that let them to use the native facilities when -available while not preventing other backends to work when they -aren't. - - -Types and classes -+++++++++++++++++ - -Most of the primitive types defined in *ootypesystem* are the very -same of those found in *lltypesystem*: ``Bool``, ``Signed``, -``Unsigned``, ``Float``, ``Char``, ``UniChar`` and ``Void``. - -The target platform is supposed to support classes and instances with -**single inheritance**. Instances of user-defined classes are mapped -to the ``Instance`` type, whose ``_superclass`` attribute indicates -the base class of the instance. At the very beginning of the -inheritance hierarchy there is the ``Root`` object, i.e. the common -base class between all instances; if the target platform has the -notion of a common base class too, the backend can choose to map the -``Root`` class to its native equivalent. - -Object of ``Instance`` type can have attributes and methods: -attributes are got and set by the ``oogetfield`` and ``oosetfield`` -operations, while method calls are expressed by the ``oosend`` -operation. - -Classes are passed around using the ``Class`` type: this is a first -order class type whose only goal is to allow **runtime instantiation** -of the class. Backends that don't support this feature natively, such -as Java, may need to use some sort of placeholder instead. - - -Static vs. dynamic typing -+++++++++++++++++++++++++ - -The target platform is assumed to be **statically typed**, i.e. the -type of each object is known at compile time. From noreply at buildbot.pypy.org Tue Jul 30 09:02:09 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 09:02:09 +0200 (CEST) Subject: [pypy-commit] pypy pypy3-release-2.1.x: mention arm too, other edits Message-ID: <20130730070209.AAABA1C346F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: pypy3-release-2.1.x Changeset: r65809:5b4971cbfc45 Date: 2013-07-29 23:56 -0700 http://bitbucket.org/pypy/pypy/changeset/5b4971cbfc45/ Log: mention arm too, other edits diff --git a/pypy/doc/release-pypy3-2.1.0-beta1.rst b/pypy/doc/release-pypy3-2.1.0-beta1.rst --- a/pypy/doc/release-pypy3-2.1.0-beta1.rst +++ b/pypy/doc/release-pypy3-2.1.0-beta1.rst @@ -3,7 +3,7 @@ ================== We're pleased to announce the first beta of the upcoming 2.1.0 release of -PyPy3. This is the first release of PyPy which targets Python 3.2 +PyPy3. This is the first release of PyPy which targets Python 3 (3.2.3) compatibility. We would like to thank all of the people who donated_ to the `py3k proposal`_ @@ -17,30 +17,25 @@ ========== * The first release of PyPy3: support for Python 3, targetting CPython 3.2.3! - Albeit with a few missing features: - - The stdlib test_memoryview includes some failing tests (marked to - skip) and test_multiprocessing is known to deadlock on some - platforms + - There are some `known issues`_ including performance regressions (issues + `#1540`_ & `#1541`_) slated to be resolved before the final release. - - There are some known performance regressions (issues `#1540`_ & - `#1541`_) slated to be resolved before the final release - - - NumPyPy is currently disabled - -What is PyPy3? +What is PyPy? ============== -PyPy3 is a very compliant Python interpreter, almost a drop-in replacement for -CPython 3.2.3. It's fast due to its integrated tracing JIT compiler. +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.3 or 3.2.3. It's fast due to its integrated tracing JIT compiler. This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows -32. However Windows 32 support could use some improvement. +32. Also this release supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Windows 64 work is still stalling and we would welcome a volunteer to handle that. -How to use PyPy3? +How to use PyPy? ================= We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv @@ -49,6 +44,7 @@ .. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html .. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`known issues`: https://bugs.pypy.org/issue?%40search_text=&title=py3k&%40columns=title&keyword=&id=&%40columns=id&creation=&creator=&release=&activity=&%40columns=activity&%40sort=activity&actor=&priority=&%40group=priority&status=-1%2C1%2C2%2C3%2C4%2C5%2C6&%40columns=status&assignedto=&%40columns=assignedto&%40pagesize=50&%40startwith=0&%40queryname=&%40old-queryname=&%40action=search .. _`#1540`: https://bugs.pypy.org/issue1540 .. _`#1541`: https://bugs.pypy.org/issue1541 .. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv From noreply at buildbot.pypy.org Tue Jul 30 09:02:11 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 09:02:11 +0200 (CEST) Subject: [pypy-commit] pypy pypy3-release-2.1.x: Added tag pypy3-release-2.1.0-beta1 for changeset 5b4971cbfc45 Message-ID: <20130730070211.AF1611C346F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: pypy3-release-2.1.x Changeset: r65810:d63636b30cc0 Date: 2013-07-29 23:58 -0700 http://bitbucket.org/pypy/pypy/changeset/d63636b30cc0/ Log: Added tag pypy3-release-2.1.0-beta1 for changeset 5b4971cbfc45 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,4 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +5b4971cbfc457cea6b78ab7a5691649fa89e052b pypy3-release-2.1.0-beta1 From noreply at buildbot.pypy.org Tue Jul 30 09:42:52 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 30 Jul 2013 09:42:52 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix problem in zrpy_gc_test.py (we need to break the transaction in order to free all possible weakref'ed objects) Message-ID: <20130730074252.54B5D1C01F6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65811:200e28316921 Date: 2013-07-30 09:42 +0200 http://bitbucket.org/pypy/pypy/changeset/200e28316921/ Log: fix problem in zrpy_gc_test.py (we need to break the transaction in order to free all possible weakref'ed objects) diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -38,12 +38,23 @@ name = args[1] if argc > 2: n = int(args[2]) + r_list = [] for i in range(20): r = g(name, n) r_list.append(r) rgc.collect() + + if rgc.stm_is_enabled(): + from rpython.rlib import rstm + # this breaks the transaction. necessary to make possible + # weak-reffed private_from_protected objects non-private + # and thereby non-reffed (remove them from some list) + rstm.before_external_call() + rstm.after_external_call() + rgc.collect(); rgc.collect() + freed = 0 for i, r in enumerate(r_list): if r() is None: @@ -51,6 +62,7 @@ else: print "not freed:", r(), "pos:", i print freed + return 0 return entrypoint diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -c528da482152 +0ebfd6dd4f46 diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -70,8 +70,10 @@ do stm_write_barrier() again if we ended the transaction, or if we did a potential collection (e.g. stm_allocate()). */ -static inline gcptr stm_read_barrier(gcptr); -static inline gcptr stm_write_barrier(gcptr); +#if 0 // (optimized version below) +gcptr stm_read_barrier(gcptr); +gcptr stm_write_barrier(gcptr); +#endif /* start a new transaction, calls callback(), and when it returns finish that transaction. callback() is called with the 'arg' @@ -140,16 +142,12 @@ /************************************************************/ -/* macro-like functionality */ +/* macro functionality */ extern __thread gcptr *stm_shadowstack; -static inline void stm_push_root(gcptr obj) { - *stm_shadowstack++ = obj; -} -static inline gcptr stm_pop_root(void) { - return *--stm_shadowstack; -} +#define stm_push_root(obj) (*stm_shadowstack++ = (obj)) +#define stm_pop_root() (*--stm_shadowstack) extern __thread revision_t stm_private_rev_num; gcptr stm_DirectReadBarrier(gcptr); @@ -161,21 +159,18 @@ (*(gcptr *)(stm_read_barrier_cache + ((revision_t)(obj) & FX_MASK))) #define UNLIKELY(test) __builtin_expect(test, 0) -static inline gcptr stm_read_barrier(gcptr obj) { - /* XXX optimize to get the smallest code */ - if (UNLIKELY((obj->h_revision != stm_private_rev_num) && - (FXCACHE_AT(obj) != obj))) - obj = stm_DirectReadBarrier(obj); - return obj; -} -static inline gcptr stm_write_barrier(gcptr obj) { - if (UNLIKELY((obj->h_revision != stm_private_rev_num) | - ((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0))) - obj = stm_WriteBarrier(obj); - return obj; -} -#undef UNLIKELY +#define stm_read_barrier(obj) \ + (UNLIKELY(((obj)->h_revision != stm_private_rev_num) && \ + (FXCACHE_AT(obj) != (obj))) ? \ + stm_DirectReadBarrier(obj) \ + : (obj)) + +#define stm_write_barrier(obj) \ + (UNLIKELY(((obj)->h_revision != stm_private_rev_num) || \ + (((obj)->h_tid & GCFLAG_WRITE_BARRIER) != 0)) ? \ + stm_WriteBarrier(obj) \ + : (obj)) #endif From noreply at buildbot.pypy.org Tue Jul 30 11:47:51 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 30 Jul 2013 11:47:51 +0200 (CEST) Subject: [pypy-commit] pypy default: support const char* (test is a bit imperfect, but I fail to see when Message-ID: <20130730094751.54A0C1C02E4@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65812:19e81f8f9f39 Date: 2013-07-30 11:47 +0200 http://bitbucket.org/pypy/pypy/changeset/19e81f8f9f39/ Log: support const char* (test is a bit imperfect, but I fail to see when it actually explodes) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -649,6 +649,10 @@ # char * CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True})) +# const char * +CONST_CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True, + 'render_as_const': True})) + # wchar_t * CWCHARP = lltype.Ptr(lltype.Array(lltype.UniChar, hints={'nolength': True})) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -358,6 +358,8 @@ self.fullptrtypename = 'void *@' else: self.fullptrtypename = self.itemtypename.replace('@', '*@') + if ARRAY._hints.get("render_as_const"): + self.fullptrtypename = 'const ' + self.fullptrtypename def setup(self): """Array loops are forbidden by ForwardReference.become() because diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -1,5 +1,6 @@ import py from rpython.rtyper.lltypesystem.lltype import * +from rpython.rtyper.lltypesystem import rffi from rpython.translator.c.test.test_genc import compile from rpython.tool.sourcetools import func_with_new_name @@ -314,14 +315,14 @@ from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import rffi, llmemory, lltype P = lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1)) - + def f(): a = llstr("xyz") b = (llmemory.cast_ptr_to_adr(a) + llmemory.offsetof(STR, 'chars') + llmemory.itemoffsetof(STR.chars, 0)) buf = rffi.cast(rffi.VOIDP, b) return buf[2] - + fn = self.getcompiled(f, []) res = fn() assert res == 'z' @@ -941,3 +942,22 @@ assert fn(0) == 10 assert fn(1) == 10 + 521 assert fn(2) == 10 + 34 + + def test_const_char_star(self): + import os + from rpython.translator.tool.cbuild import ExternalCompilationInfo + + eci = ExternalCompilationInfo(includes=["stdlib.h"]) + atoi = rffi.llexternal('atoi', [rffi.CONST_CCHARP], rffi.INT, + compilation_info=eci) + + def f(n): + s = malloc(rffi.CCHARP.TO, 2, flavor='raw') + s[0] = '9' + s[1] = '\0' + res = atoi(rffi.cast(rffi.CONST_CCHARP, s)) + free(s, flavor='raw') + return res + + fn = self.getcompiled(f, [int]) + assert fn(0) == 9 From noreply at buildbot.pypy.org Tue Jul 30 11:49:19 2013 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 30 Jul 2013 11:49:19 +0200 (CEST) Subject: [pypy-commit] pypy default: kill a useless import Message-ID: <20130730094919.6B6271C337E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r65813:d70b8eb9c481 Date: 2013-07-30 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/d70b8eb9c481/ Log: kill a useless import diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -944,7 +944,6 @@ assert fn(2) == 10 + 34 def test_const_char_star(self): - import os from rpython.translator.tool.cbuild import ExternalCompilationInfo eci = ExternalCompilationInfo(includes=["stdlib.h"]) From noreply at buildbot.pypy.org Tue Jul 30 14:12:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 14:12:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Document the registers on x86-64 in this file too Message-ID: <20130730121220.133E61C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65814:228ee0fda188 Date: 2013-07-30 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/228ee0fda188/ Log: Document the registers on x86-64 in this file too diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -729,6 +729,10 @@ # - frame address (actually the addr of the retaddr of the current function; # that's the last word of the frame in memory) # +# On 64 bits, it is an array of 7 values instead of 5: +# +# - %rbx, %r12, %r13, %r14, %r15, %rbp; and the frame address +# if IS_64_BITS: CALLEE_SAVED_REGS = 6 From noreply at buildbot.pypy.org Tue Jul 30 14:15:38 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 30 Jul 2013 14:15:38 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix test_setobject.py. Message-ID: <20130730121538.AA9661C0359@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65815:97254954bebc Date: 2013-07-29 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/97254954bebc/ Log: Fix test_setobject.py. diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -81,9 +81,9 @@ assert space.is_true(self.space.eq(result, W_SetObject(space, self.space.wrap("")))) def test_create_set_from_list(self): + from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject - from pypy.objspace.std.model import W_Object w = self.space.wrap intstr = self.space.fromcache(IntegerSetStrategy) @@ -114,7 +114,7 @@ _initialize_set(self.space, w_set, w_list) assert w_set.strategy is self.space.fromcache(ObjectSetStrategy) for item in w_set.strategy.unerase(w_set.sstorage): - assert isinstance(item, W_Object) + assert isinstance(item, W_Root) w_list = W_ListObject(self.space, [w(1.0), w(2.0), w(3.0)]) w_set = W_SetObject(self.space) diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py --- a/pypy/tool/pytest/apptest.py +++ b/pypy/tool/pytest/apptest.py @@ -34,6 +34,7 @@ try: target(*args) except OperationError, e: + raise tb = sys.exc_info()[2] if e.match(space, space.w_KeyboardInterrupt): raise KeyboardInterrupt, KeyboardInterrupt(), tb From noreply at buildbot.pypy.org Tue Jul 30 14:15:39 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 30 Jul 2013 14:15:39 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix isinstance_w fastpath. Message-ID: <20130730121539.E4BA61C0359@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65816:84267731a9be Date: 2013-07-29 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/84267731a9be/ Log: Fix isinstance_w fastpath. diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -15,7 +15,8 @@ # Object imports from pypy.objspace.std.boolobject import W_BoolObject -from pypy.objspace.std.bytesobject import W_BytesObject, wrapstr +from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject, wrapstr +from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.complexobject import W_ComplexObject from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.floatobject import W_FloatObject @@ -667,12 +668,19 @@ self._interplevel_classes[w_type] = base # register other things + # XXX: fix automatic registration self._interplevel_classes[self.w_dict] = W_DictMultiObject self._interplevel_classes[self.w_list] = W_ListObject self._interplevel_classes[self.w_set] = W_SetObject self._interplevel_classes[self.w_tuple] = W_AbstractTupleObject self._interplevel_classes[self.w_sequenceiterator] = \ W_AbstractSeqIterObject + if self.config.objspace.std.withstrbuf: + self._interplevel_classes[self.w_str] = W_AbstractBytesObject + else: + self._interplevel_classes[self.w_str] = W_BytesObject + self._interplevel_classes[self.w_bytearray] = W_BytearrayObject + self._interplevel_classes[self.w_unicode] = W_UnicodeObject @specialize.memo() def _get_interplevel_cls(self, w_type): From noreply at buildbot.pypy.org Tue Jul 30 14:15:41 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 30 Jul 2013 14:15:41 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: oops Message-ID: <20130730121541.2B76B1C0359@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65817:e5d03b989292 Date: 2013-07-29 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/e5d03b989292/ Log: oops diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py --- a/pypy/tool/pytest/apptest.py +++ b/pypy/tool/pytest/apptest.py @@ -34,7 +34,6 @@ try: target(*args) except OperationError, e: - raise tb = sys.exc_info()[2] if e.match(space, space.w_KeyboardInterrupt): raise KeyboardInterrupt, KeyboardInterrupt(), tb From noreply at buildbot.pypy.org Tue Jul 30 14:15:42 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 30 Jul 2013 14:15:42 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Make descr_splitlines() unicode-aware. Message-ID: <20130730121542.5D0AE1C0359@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65818:93f93f772e11 Date: 2013-07-29 19:40 +0200 http://bitbucket.org/pypy/pypy/changeset/93f93f772e11/ Log: Make descr_splitlines() unicode-aware. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -74,6 +74,9 @@ _iscased = _isalpha + def _islinebreak(self, ch): + return (ch == '\n') or (ch == '\r') + def _upper(self, ch): if ch.islower(): o = ord(ch) - 32 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -117,6 +117,9 @@ _iscased = _isalpha + def _islinebreak(self, ch): + return (ch == '\n') or (ch == '\r') + def _upper(self, ch): if ch.islower(): o = ord(ch) - 32 diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -628,26 +628,24 @@ @unwrap_spec(keepends=bool) @specialize.argtype(0) def descr_splitlines(self, space, keepends=False): - data = self._val(space) - selflen = len(data) + value = self._val(space) + length = len(value) strs = [] - i = j = 0 - while i < selflen: - # Find a line and append it - while i < selflen and data[i] != '\n' and data[i] != '\r': - i += 1 - # Skip the line break reading CRLF as one line break - eol = i - i += 1 - if i < selflen and data[i-1] == '\r' and data[i] == '\n': - i += 1 + pos = 0 + while pos < length: + sol = pos + while pos < length and not self._islinebreak(value[pos]): + pos += 1 + eol = pos + pos += 1 + # read CRLF as one line break + if pos < length and value[eol] == '\r' and value[pos] == '\n': + pos += 1 if keepends: - eol = i - strs.append(data[j:eol]) - j = i - - if j < selflen: - strs.append(data[j:len(data)]) + eol = pos + strs.append(value[sol:eol]) + if pos < length: + strs.append(value[pos:length]) return self._newlist_unwrapped(space, strs) @specialize.argtype(0) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -125,6 +125,9 @@ def _iscased(self, ch): return unicodedb.iscased(ord(ch)) + def _islinebreak(self, ch): + return unicodedb.islinebreak(ord(ch)) + def _upper(self, ch): return unichr(unicodedb.toupper(ord(ch))) From noreply at buildbot.pypy.org Tue Jul 30 14:15:43 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 30 Jul 2013 14:15:43 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix descr_title(). Message-ID: <20130730121543.8CE801C0359@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65819:3c21a8894676 Date: 2013-07-30 00:14 +0200 http://bitbucket.org/pypy/pypy/changeset/3c21a8894676/ Log: Fix descr_title(). diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -91,6 +91,8 @@ else: return ch + _title = _upper + def _join_return_one(self, space, w_obj): return False diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -134,6 +134,8 @@ else: return ch + _title = _upper + def _newlist_unwrapped(self, space, lst): return space.newlist_str(lst) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -761,7 +761,7 @@ for pos in range(len(selfval)): ch = selfval[pos] if not previous_is_cased: - builder.append(self._upper(ch)) + builder.append(self._title(ch)) else: builder.append(self._lower(ch)) previous_is_cased = self._iscased(ch) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -134,6 +134,9 @@ def _lower(self, ch): return unichr(unicodedb.tolower(ord(ch))) + def _title(self, ch): + return unichr(unicodedb.totitle(ord(ch))) + def _newlist_unwrapped(self, space, lst): return space.newlist_unicode(lst) From noreply at buildbot.pypy.org Tue Jul 30 14:31:29 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 14:31:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Not really sure, but I believe this to be a fix Message-ID: <20130730123129.4D2F71C0883@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65820:2cc1812e0ada Date: 2013-07-30 14:30 +0200 http://bitbucket.org/pypy/pypy/changeset/2cc1812e0ada/ Log: Not really sure, but I believe this to be a fix diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -799,6 +799,15 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): + # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' + # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. + # We must make sure that edi and esi do not contain GC pointers. + if IS_X86_32 and self.assembler._is_asmgcc(): + for box, loc in self.rm.reg_bindings.items(): + if (loc == edi or loc == esi) and box.type == REF: + self.rm.force_spill_var(box) + assert box not in self.rm.reg_bindings + # assert op.result is None args = op.getarglist() assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments From noreply at buildbot.pypy.org Tue Jul 30 14:35:09 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 30 Jul 2013 14:35:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: hg merge py3k Message-ID: <20130730123509.66FB91C0883@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r65821:65a980b9c1ec Date: 2013-07-30 14:32 +0200 http://bitbucket.org/pypy/pypy/changeset/65a980b9c1ec/ Log: hg merge py3k diff too long, truncating to 2000 out of 51951 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py --- a/lib-python/3/distutils/sysconfig_pypy.py +++ b/lib-python/3/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -124,11 +125,19 @@ if compiler.compiler_type == "unix": compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"].split() + cflags = shlex.split(os.environ["CFLAGS"]) compiler.compiler.extend(cflags) compiler.compiler_so.extend(cflags) compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from .sysconfig_cpython import ( diff --git a/lib-python/3/test/test_memoryview.py b/lib-python/3/test/test_memoryview.py --- a/lib-python/3/test/test_memoryview.py +++ b/lib-python/3/test/test_memoryview.py @@ -50,6 +50,7 @@ m = None self.assertEqual(getrefcount(b), oldrefcount) + @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_getitem(self): for tp in self._types: self.check_getitem_with_type(tp) @@ -74,6 +75,7 @@ m = None self.assertEqual(getrefcount(b), oldrefcount) + @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_setitem_writable(self): if not self.rw_type: return @@ -126,6 +128,7 @@ with self.assertRaises(TypeError): del m[1:4] + @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_tobytes(self): for tp in self._types: m = self._view(tp(self._source)) @@ -142,6 +145,7 @@ l = m.tolist() self.assertEqual(l, list(b"abcdef")) + @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_compare(self): # memoryviews can compare for equality with other objects # having the buffer interface. @@ -189,6 +193,7 @@ m = self.check_attributes_with_type(self.ro_type) self.assertEqual(m.readonly, True) + @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_attributes_writable(self): if not self.rw_type: return diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -7,7 +7,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -50,11 +50,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -332,10 +327,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -343,10 +334,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,65 +65,56 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ @@ -258,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -300,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is @@ -84,12 +83,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ From noreply at buildbot.pypy.org Tue Jul 30 16:21:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 16:21:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Refactor for cpyext PyNumber_CoerceEx Message-ID: <20130730142126.896361C0883@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65822:73c0b2494f1f Date: 2013-07-30 16:20 +0200 http://bitbucket.org/pypy/pypy/changeset/73c0b2494f1f/ Log: Refactor for cpyext PyNumber_CoerceEx diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -472,6 +472,14 @@ return space.wrap(1) def coerce(space, w_obj1, w_obj2): + w_res = space.try_coerce(w_obj1, w_obj2) + if w_res is None: + raise OperationError(space.w_TypeError, + space.wrap("coercion failed")) + return w_res + + def try_coerce(space, w_obj1, w_obj2): + """Returns a wrapped 2-tuple or a real None if it failed.""" w_typ1 = space.type(w_obj1) w_typ2 = space.type(w_obj2) w_left_src, w_left_impl = space.lookup_in_type_where(w_typ1, '__coerce__') @@ -488,8 +496,7 @@ if w_res is None or space.is_w(w_res, space.w_None): w_res = _invoke_binop(space, w_right_impl, w_obj2, w_obj1) if w_res is None or space.is_w(w_res, space.w_None): - raise OperationError(space.w_TypeError, - space.wrap("coercion failed")) + return None if (not space.isinstance_w(w_res, space.w_tuple) or space.len_w(w_res) != 2): raise OperationError(space.w_TypeError, From noreply at buildbot.pypy.org Tue Jul 30 19:04:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 19:04:08 +0200 (CEST) Subject: [pypy-commit] cffi default: Crash with a clear error message in case of built-in but unsupported Message-ID: <20130730170408.CC8B51C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1299:bc462d3c35a6 Date: 2013-07-30 18:44 +0200 http://bitbucket.org/cffi/cffi/changeset/bc462d3c35a6/ Log: Crash with a clear error message in case of built-in but unsupported types. Add a test that fails so far, to fix. diff --git a/cffi/commontypes.py b/cffi/commontypes.py --- a/cffi/commontypes.py +++ b/cffi/commontypes.py @@ -30,7 +30,9 @@ elif result in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result = model.PrimitiveType(result) else: - assert commontype != result + if commontype == result: + raise api.FFIError("Unsupported type: %r. Please file a bug " + "if you think it should be." % (commontype,)) result = resolve_common_type(result) # recursively assert isinstance(result, model.BaseTypeByIdentity) _CACHE[commontype] = result diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -198,6 +198,9 @@ typerepr = self.TypeRepr ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { short a, b, c; };") + p = ffi.cast("short unsigned int", 0) + assert repr(p) == "" + assert repr(ffi.typeof(p)) == typerepr % "unsigned short" p = ffi.cast("unsigned short int", 0) assert repr(p) == "" assert repr(ffi.typeof(p)) == typerepr % "unsigned short" From noreply at buildbot.pypy.org Tue Jul 30 19:04:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 19:04:09 +0200 (CEST) Subject: [pypy-commit] cffi default: Accept short, long, signed, unsigned as prefixes of a real type (usually Message-ID: <20130730170409.F080E1C0883@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1300:c268d05a14da Date: 2013-07-30 19:03 +0200 http://bitbucket.org/cffi/cffi/changeset/c268d05a14da/ Log: Accept short, long, signed, unsigned as prefixes of a real type (usually int) written in any order. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -290,13 +290,26 @@ # assume a primitive type. get it from .names, but reduce # synonyms to a single chosen combination names = list(type.names) - if names == ['signed'] or names == ['unsigned']: - names.append('int') - if names[0] == 'signed' and names != ['signed', 'char']: - names.pop(0) - if (len(names) > 1 and names[-1] == 'int' - and names != ['unsigned', 'int']): - names.pop() + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names ident = ' '.join(names) if ident == 'void': return model.void_type From noreply at buildbot.pypy.org Tue Jul 30 20:10:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 20:10:52 +0200 (CEST) Subject: [pypy-commit] pypy default: Argh. Turning "try: list[index]; except IndexError" into "try: Message-ID: <20130730181052.1DFF91C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65823:1e4a202133e5 Date: 2013-07-30 20:09 +0200 http://bitbucket.org/pypy/pypy/changeset/1e4a202133e5/ Log: Argh. Turning "try: list[index]; except IndexError" into "try: method(index); except IndexError" just doesn't work in RPython after translation. Grrr. diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -195,11 +195,10 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fieldstate) > value.getlength(): + raise BadVirtualState for i in range(len(self.fieldstate)): - try: - v = value.get_item_value(i) - except IndexError: - raise BadVirtualState + v = value.get_item_value(i) s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -269,13 +268,13 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fielddescrs) > len(value._items): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): try: v = value._items[i][self.fielddescrs[i][j]] - except IndexError: - raise BadVirtualState except KeyError: raise BadVirtualState s = self.fieldstate[p] From noreply at buildbot.pypy.org Tue Jul 30 20:43:12 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 20:43:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Change the magic number in .pyc files: we used default_magic = 0xf303+2, Message-ID: <20130730184312.C77F91C00D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65824:c3992b355ed7 Date: 2013-07-30 20:42 +0200 http://bitbucket.org/pypy/pypy/changeset/c3992b355ed7/ Log: Change the magic number in .pyc files: we used default_magic = 0xf303+2, now it is 0xf303+6. The most recent incompatible change that requires this change is e953dfbc7f0a. diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack(" Author: Armin Rigo Branch: release-2.1.x Changeset: r65825:43efd6376dab Date: 2013-07-30 20:42 +0200 http://bitbucket.org/pypy/pypy/changeset/43efd6376dab/ Log: Change the magic number in .pyc files: we used default_magic = 0xf303+2, now it is 0xf303+6. The most recent incompatible change that requires this change is e953dfbc7f0a. diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack(" Author: Manuel Jacob Branch: py3k-memoryview Changeset: r65826:d709ed43b270 Date: 2013-07-30 21:42 +0200 http://bitbucket.org/pypy/pypy/changeset/d709ed43b270/ Log: Unskip memoryview tests. diff --git a/lib-python/3/test/test_memoryview.py b/lib-python/3/test/test_memoryview.py --- a/lib-python/3/test/test_memoryview.py +++ b/lib-python/3/test/test_memoryview.py @@ -50,7 +50,6 @@ m = None self.assertEqual(getrefcount(b), oldrefcount) - @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_getitem(self): for tp in self._types: self.check_getitem_with_type(tp) @@ -75,7 +74,6 @@ m = None self.assertEqual(getrefcount(b), oldrefcount) - @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_setitem_writable(self): if not self.rw_type: return @@ -128,7 +126,6 @@ with self.assertRaises(TypeError): del m[1:4] - @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_tobytes(self): for tp in self._types: m = self._view(tp(self._source)) @@ -145,7 +142,6 @@ l = m.tolist() self.assertEqual(l, list(b"abcdef")) - @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_compare(self): # memoryviews can compare for equality with other objects # having the buffer interface. @@ -193,7 +189,6 @@ m = self.check_attributes_with_type(self.ro_type) self.assertEqual(m.readonly, True) - @unittest.skip('XXX: https://bugs.pypy.org/issue1542') def test_attributes_writable(self): if not self.rw_type: return From noreply at buildbot.pypy.org Tue Jul 30 22:00:28 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 22:00:28 +0200 (CEST) Subject: [pypy-commit] pypy pypy3-release-2.1.x: label as 2.1 like default's release was Message-ID: <20130730200028.DB12C1C01F6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: pypy3-release-2.1.x Changeset: r65827:3a818ce5da13 Date: 2013-07-30 12:59 -0700 http://bitbucket.org/pypy/pypy/changeset/3a818ce5da13/ Log: label as 2.1 like default's release was diff --git a/pypy/doc/release-pypy3-2.1.0-beta1.rst b/pypy/doc/release-pypy3-2.1.0-beta1.rst --- a/pypy/doc/release-pypy3-2.1.0-beta1.rst +++ b/pypy/doc/release-pypy3-2.1.0-beta1.rst @@ -1,17 +1,17 @@ -================== -PyPy3 2.1.0 beta 1 -================== +================ +PyPy3 2.1 beta 1 +================ -We're pleased to announce the first beta of the upcoming 2.1.0 release of +We're pleased to announce the first beta of the upcoming 2.1 release of PyPy3. This is the first release of PyPy which targets Python 3 (3.2.3) compatibility. We would like to thank all of the people who donated_ to the `py3k proposal`_ for supporting the work that went into this and future releases. -You can download the PyPy3 2.1.0 beta 1 release here: +You can download the PyPy3 2.1 beta 1 release here: - http://pypy.org/download.html + http://pypy.org/download.html#pypy3-beta-1 Highlights ========== From noreply at buildbot.pypy.org Tue Jul 30 22:22:00 2013 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Tue, 30 Jul 2013 22:22:00 +0200 (CEST) Subject: [pypy-commit] pypy refine-testrunner: merge from default Message-ID: <20130730202200.CE7E61C00D8@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r65828:81d6a3023f58 Date: 2013-07-30 22:20 +0200 http://bitbucket.org/pypy/pypy/changeset/81d6a3023f58/ Log: merge from default diff too long, truncating to 2000 out of 100779 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -6,6 +6,7 @@ .idea .project .pydevproject +__pycache__ syntax: regexp ^testresult$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,6 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -2,9 +2,9 @@ ======= Except when otherwise stated (look for LICENSE files in directories or -information at the beginning of each file) all software and -documentation in the 'pypy', 'ctype_configure', 'dotviewer', 'demo', -and 'lib_pypy' directories is licensed as follows: +information at the beginning of each file) all software and documentation in +the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', and 'lib_pypy' +directories is licensed as follows: The MIT License @@ -38,176 +38,239 @@ Armin Rigo Maciej Fijalkowski Carl Friedrich Bolz + Antonio Cuni Amaury Forgeot d'Arc - Antonio Cuni Samuele Pedroni + Alex Gaynor Michael Hudson + David Schneider Holger Krekel - Alex Gaynor Christian Tismer Hakan Ardo Benjamin Peterson - David Schneider + Matti Picus + Philip Jenvey + Anders Chrigstrom + Brian Kearns Eric van Riet Paap - Anders Chrigstrom Richard Emslie + Alexander Schremmer + Wim Lavrijsen Dan Villiom Podlaski Christiansen - Alexander Schremmer + Manuel Jacob Lukas Diekmann + Sven Hager + Anders Lehmann Aurelien Campeas - Anders Lehmann + Niklaus Haldimann + Ronan Lamy Camillo Bruni - Niklaus Haldimann - Sven Hager + Laura Creighton + Toon Verwaest Leonardo Santagada - Toon Verwaest Seo Sanghyeon Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp Lawrence Oluyede Bartosz Skowron - Jakub Gustak - Guido Wesdorp Daniel Roberts - Laura Creighton + Niko Matsakis Adrien Di Mascio Ludovic Aubry - Niko Matsakis - Wim Lavrijsen - Matti Picus + Alexander Hesse + Jacob Hallen + Romain Guillebert Jason Creighton - Jacob Hallen Alex Martelli - Anders Hammarquist + Michal Bendowski Jan de Mooij + Michael Foord Stephan Diehl - Michael Foord Stefan Schwarzer + Valentino Volonghi Tomek Meka Patrick Maupin + stian Bob Ippolito Bruno Gola + Jean-Paul Calderone + Timo Paulssen Alexandre Fayolle + Simon Burton Marius Gedminas - Simon Burton - David Edelsohn - Jean-Paul Calderone John Witulski - Timo Paulssen - holger krekel + Greg Price Dario Bertini Mark Pearse + Simon Cross + Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov - Valentino Volonghi Paul deGrandis Ilya Osadchiy - Ronny Pfannschmidt Adrian Kuhn + Boris Feigin tav Georg Brandl - Philip Jenvey + Bert Freudenberg + Stian Andreassen + Stefano Rivera + Wanja Saatkamp Gerald Klix - Wanja Saatkamp - Boris Feigin + Mike Blume + Taavi Burns Oscar Nierstrasz David Malcolm Eugene Oden Henry Mason + Preston Timmons Jeff Terrace + David Ripton + Dusty Phillips Lukas Renggli Guenter Jantzen + Tobias Oberstein + Remi Meier Ned Batchelder - Bert Freudenberg Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin Michael Schneider Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey - Igor Trindade Oliveira Lucian Branescu Mihaila + Tim Felgentreff + Tyler Wade + Gabriel Lavoie Olivier Dormond Jared Grubb Karl Bartel - Gabriel Lavoie + Brian Dorsey Victor Stinner - Brian Dorsey Stuart Williams + Jasper Schulz Toby Watson Antoine Pitrou + Aaron Iles + Michael Cheng Justas Sadzevicius + Gasper Zejn Neil Shepperd Mikael Schönenberg - Gasper Zejn + Elmo Mäntynen + Tobias Pape Jonathan David Riehl - Elmo Mäntynen + Stanislaw Halik Anders Qvist + Chirag Jadwani Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre Alexander Sedov Corbin Simpson - Vincent Legoll - Romain Guillebert - Alan McIntyre - Alex Perry + Christopher Pope + Laurence Tratt + Guillebert Romain + Christian Tismer + Dan Stromberg + Stefano Parmesan + Christian Hudon + Alexis Daboville Jens-Uwe Mager - Simon Cross - Dan Stromberg - Guillebert Romain Carl Meyer + Karl Ramm Pieter Zieschang + Gabriel + Paweł Piotr Przeradowski + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Jacek Generowicz Alejandro J. Cura - Sylvain Thenault - Christoph Gerum + Jacob Oscarson Travis Francis Athougies + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann Henrik Vendelbo - Lutz Paelike - Jacob Oscarson - Martin Blais - Lucio Torre - Lene Wagner + Dan Buch Miguel de Val Borro Artur Lisiecki - Bruno Gola + Sergey Kishchenko Ignas Mikalajunas - Stefano Rivera + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + Andrews Medina + roberto at goyle + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle Joshua Gilbert - Godefroid Chappelle - Yusei Tahara + Dan Colish Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Floris Bruynooghe + Akira Li + Gustavo Niemeyer Stephan Busemann - Gustavo Niemeyer - William Leslie - Akira Li - Kristjan Valur Jonsson - Bobby Impollonia - Michael Hudson-Doyle - Laurence Tratt - Yasir Suhail - Andrew Thompson - Anders Sigfridsson - Floris Bruynooghe - Jacek Generowicz - Dan Colish - Zooko Wilcox-O Hearn - Dan Loewenherz + Anna Katrina Dominguez + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuhäuser + Buck Golemon + Konrad Delong + Dinu Gherman Chris Lambacher - Dinu Gherman - Brett Cannon - Daniel Neuhäuser - Michael Chermside - Konrad Delong - Anna Ravencroft - Greg Price - Armin Ronacher - Christian Muirhead + coolbutuseless at gmail.com Jim Baker Rodrigo Araújo - Romain Guillebert + Armin Ronacher + Brett Cannon + yrttyr + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Andrew Chambers + Julien Phalip + Dan Loewenherz Heinrich-Heine University, Germany Open End AB (formerly AB Strakt), Sweden @@ -218,45 +281,22 @@ Impara, Germany Change Maker, Sweden University of California Berkeley, USA + Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike License. -License for 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' -============================================================== +License for 'lib-python/2.7' +============================ -Except when otherwise stated (look for LICENSE files or -copyright/license information at the beginning of each file) the files -in the 'lib-python/2.7.0' and 'lib-python/2.7.0-modified' directories -are all copyrighted by the Python Software Foundation and licensed under -the Python Software License of which you can find a copy here: +Except when otherwise stated (look for LICENSE files or copyright/license +information at the beginning of each file) the files in the 'lib-python/2.7' +directory are all copyrighted by the Python Software Foundation and licensed +under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'pypy/translator/jvm/src/jna.jar' -============================================= - -The file 'pypy/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'pypy/translator/jvm/src/jasmin.jar' -================================================ - -The file 'pypy/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/command/install.py b/lib-python/2.7/distutils/command/install.py --- a/lib-python/2.7/distutils/command/install.py +++ b/lib-python/2.7/distutils/command/install.py @@ -474,8 +474,8 @@ def select_scheme (self, name): # it's the caller's problem if they supply a bad name! - if hasattr(sys, 'pypy_version_info') and not ( - name.endswith('_user') or name.endswith('_home')): + if (hasattr(sys, 'pypy_version_info') and + not name.endswith(('_user', '_home'))): name = 'pypy' scheme = INSTALL_SCHEMES[name] for key in SCHEME_KEYS: diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -1,30 +1,16 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" - -import sys - # The content of this file is redirected from # sysconfig_cpython or sysconfig_pypy. +# All underscore names are imported too, because +# people like to use undocumented sysconfig._xxx +# directly. +import sys if '__pypy__' in sys.builtin_module_names: - from distutils.sysconfig_pypy import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools - from distutils.sysconfig_pypy import _variable_rx # read_setup_file() + from distutils import sysconfig_pypy as _sysconfig_module else: - from distutils.sysconfig_cpython import * - from distutils.sysconfig_cpython import _config_vars # needed by setuptools - from distutils.sysconfig_cpython import _variable_rx # read_setup_file() + from distutils import sysconfig_cpython as _sysconfig_module +globals().update(_sysconfig_module.__dict__) _USE_CLANG = None diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py --- a/lib-python/2.7/distutils/sysconfig_cpython.py +++ b/lib-python/2.7/distutils/sysconfig_cpython.py @@ -9,7 +9,7 @@ Email: """ -__revision__ = "$Id$" +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" import os import re diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -1,9 +1,18 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os -import imp +import shlex from distutils.errors import DistutilsPlatformError @@ -49,16 +58,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -71,7 +75,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars @@ -119,13 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-fPIC', '-Wimplicit']) + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"] - compiler.compiler.append(cflags) - compiler.compiler_so.append(cflags) - compiler.linker_so.append(cflags) + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py --- a/lib-python/2.7/json/__init__.py +++ b/lib-python/2.7/json/__init__.py @@ -105,6 +105,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -241,7 +247,6 @@ _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) - def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing @@ -323,7 +328,10 @@ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + if _pypyjson and not isinstance(s, unicode): + return _pypyjson.loads(s) + else: + return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,21 +134,30 @@ DEBUG = 10 NOTSET = 0 -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, +# NOTE(flaper87): This is different from +# python's stdlib module since pypy's +# dicts are much faster when their +# keys are all of the same type. +# Introduced in commit 9de7b40c586f +_levelToName = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET', } +_nameToLevel = { + 'CRITICAL': CRITICAL, + 'ERROR': ERROR, + 'WARN': WARNING, + 'WARNING': WARNING, + 'INFO': INFO, + 'DEBUG': DEBUG, + 'NOTSET': NOTSET, +} +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ @@ -164,7 +173,11 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelNames.get(level, ("Level %s" % level)) + + # NOTE(flaper87): Check also in _nameToLevel + # if value is None. + return (_levelToName.get(level) or + _nameToLevel.get(level, ("Level %s" % level))) def addLevelName(level, levelName): """ @@ -174,8 +187,8 @@ """ _acquireLock() try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level + _levelToName[level] = levelName + _nameToLevel[levelName] = level finally: _releaseLock() @@ -183,9 +196,9 @@ if isinstance(level, int): rv = level elif str(level) == level: - if level not in _levelNames: + if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) - rv = _levelNames[level] + rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv @@ -277,7 +290,7 @@ self.lineno = lineno self.funcName = func self.created = ct - self.msecs = (ct - long(ct)) * 1000 + self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() diff --git a/lib-python/2.7/logging/config.py b/lib-python/2.7/logging/config.py --- a/lib-python/2.7/logging/config.py +++ b/lib-python/2.7/logging/config.py @@ -156,7 +156,7 @@ h = klass(*args) if "level" in opts: level = cp.get(sectname, "level") - h.setLevel(logging._levelNames[level]) + h.setLevel(level) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): @@ -187,7 +187,7 @@ opts = cp.options(sectname) if "level" in opts: level = cp.get(sectname, "level") - log.setLevel(logging._levelNames[level]) + log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = cp.get(sectname, "handlers") @@ -237,7 +237,7 @@ existing.remove(qn) if "level" in opts: level = cp.get(sectname, "level") - logger.setLevel(logging._levelNames[level]) + logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/pydoc.py b/lib-python/2.7/pydoc.py --- a/lib-python/2.7/pydoc.py +++ b/lib-python/2.7/pydoc.py @@ -1953,7 +1953,11 @@ if key is None: callback(None, modname, '') else: - desc = split(__import__(modname).__doc__ or '', '\n')[0] + try: + module_doc = __import__(modname).__doc__ + except ImportError: + module_doc = None + desc = split(module_doc or '', '\n')[0] if find(lower(modname + ' - ' + desc), key) >= 0: callback(None, modname, desc) diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,11 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - self._sock._decref_socketios() + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py --- a/lib-python/2.7/test/test_codecs.py +++ b/lib-python/2.7/test/test_codecs.py @@ -2,7 +2,11 @@ import unittest import codecs import locale -import sys, StringIO, _testcapi +import sys, StringIO +try: + import _testcapi +except ImportError: + _testcapi = None class Queue(object): """ @@ -1387,7 +1391,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders: + if encoding not in broken_incremental_coders and _testcapi: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -65,7 +65,8 @@ self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = logger_dict.copy() - self.saved_level_names = logging._levelNames.copy() + self.saved_name_to_level = logging._nameToLevel.copy() + self.saved_level_to_name = logging._levelToName.copy() finally: logging._releaseLock() @@ -97,8 +98,10 @@ self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) + logging._levelToName.clear() + logging._levelToName.update(self.saved_level_to_name) + logging._nameToLevel.clear() + logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list @@ -275,6 +278,24 @@ def test_invalid_name(self): self.assertRaises(TypeError, logging.getLogger, any) + def test_get_level_name(self): + """Test getLevelName returns level constant.""" + # NOTE(flaper87): Bug #1517 + self.assertEqual(logging.getLevelName('NOTSET'), 0) + self.assertEqual(logging.getLevelName('DEBUG'), 10) + self.assertEqual(logging.getLevelName('INFO'), 20) + self.assertEqual(logging.getLevelName('WARN'), 30) + self.assertEqual(logging.getLevelName('WARNING'), 30) + self.assertEqual(logging.getLevelName('ERROR'), 40) + self.assertEqual(logging.getLevelName('CRITICAL'), 50) + + self.assertEqual(logging.getLevelName(0), 'NOTSET') + self.assertEqual(logging.getLevelName(10), 'DEBUG') + self.assertEqual(logging.getLevelName(20), 'INFO') + self.assertEqual(logging.getLevelName(30), 'WARNING') + self.assertEqual(logging.getLevelName(40), 'ERROR') + self.assertEqual(logging.getLevelName(50), 'CRITICAL') + class BasicFilterTest(BaseTest): """Test the bundled Filter class.""" diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib-python/2.7/test/test_sysconfig.py b/lib-python/2.7/test/test_sysconfig.py --- a/lib-python/2.7/test/test_sysconfig.py +++ b/lib-python/2.7/test/test_sysconfig.py @@ -7,7 +7,8 @@ import subprocess from copy import copy, deepcopy -from test.test_support import run_unittest, TESTFN, unlink, get_attribute +from test.test_support import (run_unittest, TESTFN, unlink, get_attribute, + import_module) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -236,7 +237,10 @@ def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) + # import_module skips the test when the CPython C Extension API + # appears to not be supported + self.assertTrue(os.path.isfile(config_h) or + not import_module('_testcapi'), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -1,6 +1,9 @@ """Test cases for traceback module""" -from _testcapi import traceback_print +try: + from _testcapi import traceback_print +except ImportError: + traceback_print = None from StringIO import StringIO import sys import unittest @@ -176,6 +179,8 @@ class TracebackFormatTests(unittest.TestCase): def test_traceback_format(self): + if traceback_print is None: + raise unittest.SkipTest('Requires _testcapi') try: raise KeyError('blah') except KeyError: diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py --- a/lib-python/2.7/test/test_unicode.py +++ b/lib-python/2.7/test/test_unicode.py @@ -1609,7 +1609,10 @@ self.assertEqual("{}".format(u), '__unicode__ overridden') def test_encode_decimal(self): - from _testcapi import unicode_encodedecimal + try: + from _testcapi import unicode_encodedecimal + except ImportError: + raise unittest.SkipTest('Requires _testcapi') self.assertEqual(unicode_encodedecimal(u'123'), b'123') self.assertEqual(unicode_encodedecimal(u'\u0663.\u0661\u0664'), diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -130,7 +130,7 @@ RegrTest('test_bz2.py', usemodules='bz2'), RegrTest('test_calendar.py'), RegrTest('test_call.py', core=True), - RegrTest('test_capi.py'), + RegrTest('test_capi.py', usemodules='cpyext'), RegrTest('test_cd.py'), RegrTest('test_cfgparser.py'), RegrTest('test_cgi.py'), @@ -177,7 +177,7 @@ RegrTest('test_cprofile.py'), RegrTest('test_crypt.py', usemodules='crypt'), RegrTest('test_csv.py', usemodules='_csv'), - RegrTest('test_ctypes.py', usemodules="_rawffi thread"), + RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"), RegrTest('test_curses.py'), RegrTest('test_datetime.py', usemodules='binascii struct'), RegrTest('test_dbm.py'), diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -20,7 +20,7 @@ or tp._type_ not in "iIhHbBlLqQ"): #XXX: are those all types? # we just dont get the type name - # in the interp levle thrown TypeError + # in the interp level thrown TypeError # from rawffi if there are more raise TypeError('bit fields not allowed for type ' + tp.__name__) @@ -166,9 +166,7 @@ if self is StructOrUnion: return if '_fields_' not in self.__dict__: - self._fields_ = [] - self._names = [] - _set_shape(self, [], self._is_union) + self._fields_ = [] # As a side-effet, this also sets the ffishape. __setattr__ = struct_setattr diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -1197,6 +1210,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,14 +1,20 @@ -import os, sys +import os, sys, imp import tempfile -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -19,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -37,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] + '/EXPORT:init' + modulename] else: libraries = [] extra_ldargs = [] @@ -49,9 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,57 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() +try: + import cpyext +except ImportError: + raise ImportError("No module named '_testcapi'") +else: + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/__init__.py @@ -0,0 +1,48 @@ +# _tkinter package -- low-level interface to libtk and libtcl. +# +# This is an internal module, applications should "import Tkinter" instead. +# +# This version is based on cffi, and is a translation of _tkinter.c +# from CPython, version 2.7.4. + +class TclError(Exception): + pass + +import cffi +try: + from .tklib import tklib, tkffi +except cffi.VerificationError: + raise ImportError("Tk headers and development libraries are required") + +from .app import TkApp + +TK_VERSION = tkffi.string(tklib.get_tk_version()) +TCL_VERSION = tkffi.string(tklib.get_tcl_version()) + +READABLE = tklib.TCL_READABLE +WRITABLE = tklib.TCL_WRITABLE +EXCEPTION = tklib.TCL_EXCEPTION + +def create(screenName=None, baseName=None, className=None, + interactive=False, wantobjects=False, wantTk=True, + sync=False, use=None): + return TkApp(screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use) + +def _flatten(item): + def _flatten1(output, item, depth): + if depth > 1000: + raise ValueError("nesting too deep in _flatten") + if not isinstance(item, (list, tuple)): + raise TypeError("argument must be sequence") + # copy items to output tuple + for o in item: + if isinstance(o, (list, tuple)): + _flatten1(output, o, depth + 1) + elif o is not None: + output.append(o) + + result = [] + _flatten1(result, item, 0) + return tuple(result) + diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/app.py @@ -0,0 +1,389 @@ +# The TkApp class. + +from .tklib import tklib, tkffi +from . import TclError +from .tclobj import TclObject, FromObj, AsObj, TypeCache + +import sys + +def varname_converter(input): + if isinstance(input, TclObject): + return input.string + return input + + +def Tcl_AppInit(app): + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + skip_tk_init = tklib.Tcl_GetVar( + app.interp, "_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) + if skip_tk_init and tkffi.string(skip_tk_init) == "1": + return + + if tklib.Tk_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + +class _CommandData(object): + def __new__(cls, app, name, func): + self = object.__new__(cls) + self.app = app + self.name = name + self.func = func + handle = tkffi.new_handle(self) + app._commands[name] = handle # To keep the command alive + return tkffi.cast("ClientData", handle) + + @tkffi.callback("Tcl_CmdProc") + def PythonCmd(clientData, interp, argc, argv): + self = tkffi.from_handle(clientData) + assert self.app.interp == interp + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK + + @tkffi.callback("Tcl_CmdDeleteProc") + def PythonCmdDelete(clientData): + self = tkffi.from_handle(clientData) + app = self.app + del app._commands[self.name] + return + + +class TkApp(object): + def __new__(cls, screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use): + if not wantobjects: + raise NotImplementedError("wantobjects=True only") + self = object.__new__(cls) + self.interp = tklib.Tcl_CreateInterp() + self._wantobjects = wantobjects + self.threaded = bool(tklib.Tcl_GetVar2Ex( + self.interp, "tcl_platform", "threaded", + tklib.TCL_GLOBAL_ONLY)) + self.thread_id = tklib.Tcl_GetCurrentThread() + self.dispatching = False + self.quitMainLoop = False + self.errorInCmd = False + + self._typeCache = TypeCache() + self._commands = {} + + # Delete the 'exit' command, which can screw things up + tklib.Tcl_DeleteCommand(self.interp, "exit") + + if screenName is not None: + tklib.Tcl_SetVar2(self.interp, "env", "DISPLAY", screenName, + tklib.TCL_GLOBAL_ONLY) + + if interactive: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "1", + tklib.TCL_GLOBAL_ONLY) + else: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "0", + tklib.TCL_GLOBAL_ONLY) + + # This is used to get the application class for Tk 4.1 and up + argv0 = className.lower() + tklib.Tcl_SetVar(self.interp, "argv0", argv0, + tklib.TCL_GLOBAL_ONLY) + + if not wantTk: + tklib.Tcl_SetVar(self.interp, "_tkinter_skip_tk_init", "1", + tklib.TCL_GLOBAL_ONLY) + + # some initial arguments need to be in argv + if sync or use: + args = "" + if sync: + args += "-sync" + if use: + if sync: + args += " " + args += "-use " + use + + tklib.Tcl_SetVar(self.interp, "argv", args, + tklib.TCL_GLOBAL_ONLY) + + Tcl_AppInit(self) + # EnableEventHook() + return self + + def __del__(self): + tklib.Tcl_DeleteInterp(self.interp) + # DisableEventHook() + + def raiseTclError(self): + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + raise TclError(tkffi.string(tklib.Tcl_GetStringResult(self.interp))) + + def wantobjects(self): + return self._wantobjects + + def _check_tcl_appartment(self): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise RuntimeError("Calling Tcl from different appartment") + + def loadtk(self): + # We want to guard against calling Tk_Init() multiple times + err = tklib.Tcl_Eval(self.interp, "info exists tk_version") + if err == tklib.TCL_ERROR: + self.raiseTclError() + tk_exists = tklib.Tcl_GetStringResult(self.interp) + if not tk_exists or tkffi.string(tk_exists) != "1": + err = tklib.Tk_Init(self.interp) + if err == tklib.TCL_ERROR: + self.raiseTclError() + + def _var_invoke(self, func, *args, **kwargs): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # The current thread is not the interpreter thread. + # Marshal the call to the interpreter thread, then wait + # for completion. + raise NotImplementedError("Call from another thread") + return func(*args, **kwargs) + + def _getvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) + + def _setvar(self, name1, value, global_only=False): + name1 = varname_converter(name1) + newval = AsObj(value) + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() + + def _unsetvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def getvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2) + + def globalgetvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2, global_only=True) + + def setvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value) + + def globalsetvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value, global_only=True) + + def unsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2) + + def globalunsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2, global_only=True) + + # COMMANDS + + def createcommand(self, cmdName, func): + if not callable(func): + raise TypeError("command not callable") + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + clientData = _CommandData(self, cmdName, func) + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) + if not res: + raise TclError("can't create Tcl command") + + def deletecommand(self, cmdName): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + if res == -1: + raise TclError("can't delete Tcl command") + + def call(self, *args): + flags = tklib.TCL_EVAL_DIRECT | tklib.TCL_EVAL_GLOBAL + + # If args is a single tuple, replace with contents of tuple + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # We cannot call the command directly. Instead, we must + # marshal the parameters to the interpreter thread. + raise NotImplementedError("Call from another thread") + + objects = tkffi.new("Tcl_Obj*[]", len(args)) + argc = len(args) + try: + for i, arg in enumerate(args): + if arg is None: + argc = i + break + obj = AsObj(arg) + tklib.Tcl_IncrRefCount(obj) + objects[i] = obj + + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() + finally: + for obj in objects: + if obj: + tklib.Tcl_DecrRefCount(obj) + return result + + def _callResult(self): + assert self._wantobjects + value = tklib.Tcl_GetObjResult(self.interp) + # Not sure whether the IncrRef is necessary, but something + # may overwrite the interpreter result while we are + # converting it. + tklib.Tcl_IncrRefCount(value) + res = FromObj(self, value) + tklib.Tcl_DecrRefCount(value) + return res + + def eval(self, script): + self._check_tcl_appartment() + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def evalfile(self, filename): + self._check_tcl_appartment() + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def split(self, arg): + if isinstance(arg, tuple): + return self._splitObj(arg) + else: + return self._split(arg) + + def splitlist(self, arg): + if isinstance(arg, tuple): + return arg + if isinstance(arg, unicode): + arg = arg.encode('utf8') + + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(self.interp, arg, argc, argv) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + result = tuple(tkffi.string(argv[0][i]) + for i in range(argc[0])) + tklib.Tcl_Free(argv[0]) + return result + + def _splitObj(self, arg): + if isinstance(arg, tuple): + size = len(arg) + # Recursively invoke SplitObj for all tuple items. + # If this does not return a new object, no action is + # needed. + result = None + newelems = (self._splitObj(elem) for elem in arg) + for elem, newelem in zip(arg, newelems): + if elem is not newelem: + return newelems + elif isinstance(arg, str): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + return arg + tklib.Tcl_Free(argv[0]) + if argc[0] > 1: + return self._split(arg) + return arg + + def _split(self, arg): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + # Not a list. + # Could be a quoted string containing funnies, e.g. {"}. + # Return the string itself. + return arg + + try: + if argc[0] == 0: + return "" + elif argc[0] == 1: + return argv[0][0] + else: + return (self._split(argv[0][i]) + for i in range(argc[0])) + finally: + tklib.Tcl_Free(argv[0]) + + def getboolean(self, s): + if isinstance(s, int): + return s + v = tkffi.new("int*") + res = tklib.Tcl_GetBoolean(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def mainloop(self, threshold): + self._check_tcl_appartment() + self.dispatching = True + while (tklib.Tk_GetNumMainWindows() > threshold and + not self.quitMainLoop and not self.errorInCmd): + + if self.threaded: + result = tklib.Tcl_DoOneEvent(0) + else: + raise NotImplementedError("TCL configured without threads") + + if result < 0: + break + self.dispatching = False + self.quitMainLoop = False + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + + def quit(self): + self.quitMainLoop = True diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tclobj.py @@ -0,0 +1,114 @@ +# TclObject, conversions with Python objects + +from .tklib import tklib, tkffi + +class TypeCache(object): + def __init__(self): + self.BooleanType = tklib.Tcl_GetObjType("boolean") + self.ByteArrayType = tklib.Tcl_GetObjType("bytearray") + self.DoubleType = tklib.Tcl_GetObjType("double") + self.IntType = tklib.Tcl_GetObjType("int") + self.ListType = tklib.Tcl_GetObjType("list") + self.ProcBodyType = tklib.Tcl_GetObjType("procbody") + self.StringType = tklib.Tcl_GetObjType("string") + + +def FromObj(app, value): + """Convert a TclObj pointer into a Python object.""" + typeCache = app._typeCache + if not value.typePtr: + buf = tkffi.buffer(value.bytes, value.length) + result = buf[:] + # If the result contains any bytes with the top bit set, it's + # UTF-8 and we should decode it to Unicode. + try: + result.decode('ascii') + except UnicodeDecodeError: + result = result.decode('utf8') + return result From noreply at buildbot.pypy.org Tue Jul 30 22:22:15 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 30 Jul 2013 22:22:15 +0200 (CEST) Subject: [pypy-commit] pypy pypy3-release-2.1.x: merp Message-ID: <20130730202215.435C91C00D8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: pypy3-release-2.1.x Changeset: r65829:3af81ced412a Date: 2013-07-30 13:21 -0700 http://bitbucket.org/pypy/pypy/changeset/3af81ced412a/ Log: merp diff --git a/pypy/doc/release-pypy3-2.1.0-beta1.rst b/pypy/doc/release-pypy3-2.1.0-beta1.rst --- a/pypy/doc/release-pypy3-2.1.0-beta1.rst +++ b/pypy/doc/release-pypy3-2.1.0-beta1.rst @@ -11,7 +11,7 @@ You can download the PyPy3 2.1 beta 1 release here: - http://pypy.org/download.html#pypy3-beta-1 + http://pypy.org/download.html#pypy3-2-1-beta-1 Highlights ========== From noreply at buildbot.pypy.org Tue Jul 30 23:00:52 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 30 Jul 2013 23:00:52 +0200 (CEST) Subject: [pypy-commit] pypy default: add version_info to numpypy status page Message-ID: <20130730210052.BE4231C01F6@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65830:6bec82bea284 Date: 2013-07-30 23:59 +0300 http://bitbucket.org/pypy/pypy/changeset/6bec82bea284/ Log: add version_info to numpypy status page diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -78,6 +78,11 @@ items.add(Item(name, kind, subitems)) return items +def get_version_str(python): + args = [python, '-c', 'import sys; print sys.version'] + lines = subprocess.check_output(args).splitlines() + return lines[0] + def split(lst): SPLIT = 5 lgt = len(lst) // SPLIT + 1 @@ -93,6 +98,7 @@ def main(argv): cpy_items = find_numpy_items("/usr/bin/python") pypy_items = find_numpy_items(argv[1], "numpypy") + ver = get_version_str(argv[1]) all_items = [] msg = "{:d}/{:d} names".format(len(pypy_items), len(cpy_items)) + " " @@ -113,7 +119,8 @@ env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)) ) - html = env.get_template("page.html").render(all_items=split(sorted(all_items)), msg=msg) + html = env.get_template("page.html").render(all_items=split(sorted(all_items)), + msg=msg, ver=ver) if len(argv) > 2: with open(argv[2], 'w') as f: f.write(html.encode("utf-8")) diff --git a/pypy/module/micronumpy/tool/numready/page.html b/pypy/module/micronumpy/tool/numready/page.html --- a/pypy/module/micronumpy/tool/numready/page.html +++ b/pypy/module/micronumpy/tool/numready/page.html @@ -34,6 +34,7 @@

NumPyPy Status

+

Version: {{ ver }}

Overall: {{ msg }}

From noreply at buildbot.pypy.org Tue Jul 30 23:06:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 30 Jul 2013 23:06:42 +0200 (CEST) Subject: [pypy-commit] pypy release-2.1.x: Argh. Turning "try: list[index]; except IndexError" into "try: Message-ID: <20130730210642.460741C01F6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-2.1.x Changeset: r65831:a5db735cb628 Date: 2013-07-30 20:09 +0200 http://bitbucket.org/pypy/pypy/changeset/a5db735cb628/ Log: Argh. Turning "try: list[index]; except IndexError" into "try: method(index); except IndexError" just doesn't work in RPython after translation. Grrr. diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -195,11 +195,10 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fieldstate) > value.getlength(): + raise BadVirtualState for i in range(len(self.fieldstate)): - try: - v = value.get_item_value(i) - except IndexError: - raise BadVirtualState + v = value.get_item_value(i) s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -269,13 +268,13 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fielddescrs) > len(value._items): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): try: v = value._items[i][self.fielddescrs[i][j]] - except IndexError: - raise BadVirtualState except KeyError: raise BadVirtualState s = self.fieldstate[p] From noreply at buildbot.pypy.org Tue Jul 30 23:40:52 2013 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 30 Jul 2013 23:40:52 +0200 (CEST) Subject: [pypy-commit] pypy default: test fix issue 1543 - add conjugate() to scalars Message-ID: <20130730214052.B12FF1C00D8@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r65832:a50d8cdd457e Date: 2013-07-31 00:38 +0300 http://bitbucket.org/pypy/pypy/changeset/a50d8cdd457e/ Log: test fix issue 1543 - add conjugate() to scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -205,6 +205,7 @@ descr_neg = _unaryop_impl("negative") descr_abs = _unaryop_impl("absolute") descr_invert = _unaryop_impl("invert") + descr_conjugate = _unaryop_impl("conjugate") def descr_divmod(self, space, w_other): w_quotient = self.descr_div(space, w_other) @@ -517,6 +518,7 @@ all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), round = interp2app(W_GenericBox.descr_round), + conjugate = interp2app(W_GenericBox.descr_conjugate), view = interp2app(W_GenericBox.descr_view), ) diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -400,6 +400,7 @@ assert conj is conjugate assert conj(c0) == c0 + assert c0.conjugate() == c0 assert conj(c1) == complex(1, -2) assert conj(1) == 1 assert conj(-3) == -3 @@ -625,6 +626,8 @@ a = array([1 + 2j, 1 - 2j]) assert (a.conj() == [1 - 2j, 1 + 2j]).all() + a = array([1,2,3.4J],dtype=complex) + assert a[2].conjugate() == 0-3.4j def test_math(self): if self.isWindows: From noreply at buildbot.pypy.org Tue Jul 30 23:54:39 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 30 Jul 2013 23:54:39 +0200 (CEST) Subject: [pypy-commit] pypy default: don't make every strategy intstance have a sizehint field, which needs to be Message-ID: <20130730215439.990801C00D8@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65833:33309a9fd9d0 Date: 2013-07-30 20:55 +0200 http://bitbucket.org/pypy/pypy/changeset/33309a9fd9d0/ Log: don't make every strategy intstance have a sizehint field, which needs to be checked whether it's -1. This saves one guard per normal list creation. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -702,11 +702,13 @@ find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') class ListStrategy(object): - sizehint = -1 def __init__(self, space): self.space = space + def get_sizehint(self): + return -1 + def init_from_list_w(self, w_list, list_w): raise NotImplementedError @@ -894,7 +896,7 @@ else: strategy = self.space.fromcache(ObjectListStrategy) - storage = strategy.get_empty_storage(self.sizehint) + storage = strategy.get_empty_storage(self.get_sizehint()) w_list.strategy = strategy w_list.lstorage = storage @@ -974,6 +976,9 @@ self.sizehint = sizehint ListStrategy.__init__(self, space) + def get_sizehint(self): + return self.sizehint + def _resize_hint(self, w_list, hint): assert hint >= 0 self.sizehint = hint From noreply at buildbot.pypy.org Tue Jul 30 23:54:47 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 30 Jul 2013 23:54:47 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130730215447.52F361C00D8@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65834:14525d450338 Date: 2013-07-30 23:53 +0200 http://bitbucket.org/pypy/pypy/changeset/14525d450338/ Log: merge diff too long, truncating to 2000 out of 53271 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -297,30 +297,6 @@ under the Python Software License of which you can find a copy here: http://www.python.org/doc/Copyright.html -License for 'rpython/translator/jvm/src/jna.jar' -============================================= - -The file 'rpyhton/translator/jvm/src/jna.jar' is licensed under the GNU -Lesser General Public License of which you can find a copy here: -http://www.gnu.org/licenses/lgpl.html - -License for 'rpython/translator/jvm/src/jasmin.jar' -================================================ - -The file 'rpyhton/translator/jvm/src/jasmin.jar' is copyright (c) 1996-2004 Jon Meyer -and distributed with permission. The use of Jasmin by PyPy does not imply -that PyPy is endorsed by Jon Meyer nor any of Jasmin's contributors. Furthermore, -the following disclaimer applies to Jasmin: - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - License for 'pypy/module/unicodedata/' ====================================== diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,6 +12,7 @@ import sys import os +import shlex from distutils.errors import DistutilsPlatformError @@ -65,11 +66,6 @@ g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check - g['OPT'] = "" - g['CFLAGS'] = "" - g['CPPFLAGS'] = "" - g['CCSHARED'] = '-shared -O2 -fPIC -Wimplicit' - g['LDSHARED'] = g['CC'] + ' -shared' global _config_vars _config_vars = g @@ -127,34 +123,21 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - cc, opt, cflags, ccshared, ldshared = get_config_vars( - 'CC', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED') - + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') - - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - - cc_cmd = cc + ' ' + cflags - - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - linker_so=ldshared) + if "CPPFLAGS" in os.environ: + cppflags = shlex.split(os.environ["CPPFLAGS"]) + compiler.compiler.extend(cppflags) + compiler.compiler_so.extend(cppflags) + compiler.linker_so.extend(cppflags) + if "CFLAGS" in os.environ: + cflags = shlex.split(os.environ["CFLAGS"]) + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) + if "LDFLAGS" in os.environ: + ldflags = shlex.split(os.environ["LDFLAGS"]) + compiler.linker_so.extend(ldflags) from sysconfig_cpython import ( diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -275,7 +275,7 @@ try: result.f_bfree = 1 self.fail("No exception thrown") - except TypeError: + except (TypeError, AttributeError): pass try: diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -165,6 +165,63 @@ Nyy rkprcgoybpxf frrz fnar. N cvax tyvggrel ebgngvat ynzoqn "vg'f yvxryl grzcbenel hagvy sberire" nevtb +"Lbh xabj jung'f avpr nobhg EClguba? Ybatre fjbeq svtugf." +nccneragyl pbashfvba vf n srngher +nccneragyl pbashfvba vf n srngher... be vf vg? +ClCl 1.7 eryrnfrq +vs lbh jnag gb or penml, lbh qba'g unir gb sbepr vg +vs lbh jnag vg gb or iveghny, lbh fubhyq abg sbepr vg + svwny: V whfg... fgnegrq pbqvat naq fhqqragyl... nobzvangvba +fabj, fabj! :-) +fabj, fabj, fabj, fabj +clcl 1.8 eryrnfrq +vg jnf srj lnxf gbb yngr +ClCl vf n enpr orgjrra hf funivat lnxf, naq gur havirefr gelvat gb cebqhpr zber naq zber lnxf +Jevgvat na SC7 nabalzbhf cebcbfny sbe ClCl vf yvxr znxvat n gi nq sbe n uvtu cresbeznapr fcbegf pne jvgubhg orvat noyr gb zragvba vgf zbqry be znahsnpghere +Fabj, fabj (ntnva) +Fgvyy fabjvat +thrff jung, fabj +"lbh haqrerfgvzngr gur vzcbegnapr bs zvpebnepuvgrpgher" "ab, vg'f zber gung jr ner fgvyy unccvyl va gur ynaq bs ernpunoyr sehvg" +Jub nz V? Naq vs lrf, ubj znal? +ClCl vf nyjnlf n cynfzn +"genafyngvba gbbypunva" = "EClgure"? gb jevgr vagrEClguref va +"sberire" va clcl grezf zrnaf yvxr srj zbaguf :) +"Onu. PClguba bofphevgl. - Nezva Evtb" +svwny: pna V vavgvngr lbh ba gur (rnfl ohg) aba-gevivny gbcvp bs jevgvat P shapgvba glcrf? :-) +nyy fbsgjner vzcebirzragf unccra ol n ovg +gur genprf qba'g yvr +:-) be engure ":-/" bss-ol-bar-xrl reebe +Be Nezva Evtb. V guvax ur'f noyr gb haqrefgnaq k86 gur jnl Plcure pna frr jbzra va gur zngevk. +V zvtug, ohg abobql erfcrpgf zr +cerohvyg vafgnapr Ryyvcfvf unf ab nggevohgr 'reeab' +guvf frnfba'f svefg "fabj! fabj!" vf urer +ClCl 2.0 orgn1 eryrnfrq - orggre yngr guna arire +Fjvgreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq +Fjvgmreynaq 2012: zber fabj va Qrprzore guna rire fvapr clcl fgnegrq + n sngny reebe, ol qrsvavgvba, vf sngny + V'z tynq gung jr cebtenz va Clguba naq jr qba'g qrny jvgu gubfr vffhrf rirel qnl. Ncneg gur snpg gung jr unir gb qrny jvgu gurz naljnl, vg frrzf +unccl arj lrne! +"zrffl" vf abg n whqtrzrag, ohg whfg n snpg bs pbzcyvpngrqarff +n ybg bs fabj +qb lbh xabj nobhg n gbnfgre jvgu 8XO bs ENZ naq 64XO bs EBZ? +vg'f orra fabjvat rirelqnl sbe n juvyr, V pna'g whfg chg "fabj, fabj" hc urer rirel qnl +fabjonyy svtugf! +sbejneq pbzcngvovyvgl jvgu bcgvzvmngvbaf gung unira'g orra vairagrq lrg +jr fgvyy unir gb jevgr fbsgjner jvgu n zrgnfcnpr ohooyr va vg +cebonoyl gur ynfg gvzr va gur frnfba, ohg: fabj, fabj! +ClCl 2.0-orgn2 eryrnfrq +Gur ceboyrz vf gung sbe nyzbfg nal aba-gevivny cebtenz, vg'f abg pyrne jung 'pbeerpg' zrnaf. +ClCl 2.0 nyzbfg eryrnfrq +ClCl 2.0 eryrnfrq +WVG pbzcvyref fubhyq or jevggra ol crbcyr jub npghnyyl unir snvgu va WVG pbzcvyref' novyvgl gb znxrf guvatf tb fpernzvat snfg +ClCl 2.0.1 eryrnfrq +arire haqrerfgvzngr gur vzcebonoyr jura lbh qb fbzrguvat ng 2TUm +ClCl 2.0.2 eryrnfrq +nyy jr arrq vf n angvir Cebybt znpuvar +V haqrefgnaq ubj qravnyvfz vf n onq qrohttvat grpuavdhr +rirel IZ fubhyq pbzr jvgu arheny argjbex genvarq gb erpbtavmr zvpeborapuznexf naq enaqbzyl syhpghngr gurz +/-9000% +lbh qvq abg nccebnpu clcl sebz gur rnfl raq: fgz, gura wvg. vg'f n ovg gur Abegu snpr +va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat """ from string import ascii_uppercase, ascii_lowercase diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,3 +1,4 @@ +import sys import _continuation __version__ = "0.4.0" @@ -5,7 +6,7 @@ # ____________________________________________________________ # Exceptions -class GreenletExit(Exception): +class GreenletExit(BaseException): """This special exception does not propagate to the parent greenlet; it can be used to kill a single greenlet.""" @@ -75,6 +76,15 @@ # up the 'parent' explicitly. Good enough, because a Ctrl-C # will show that the program is caught in this loop here.) target = target.parent + # convert a "raise GreenletExit" into "return GreenletExit" + if methodname == 'throw': + try: + raise baseargs[0], baseargs[1] + except GreenletExit, e: + methodname = 'switch' + baseargs = (((e,), {}),) + except: + baseargs = sys.exc_info()[:2] + baseargs[2:] # try: unbound_method = getattr(_continulet, methodname) @@ -147,5 +157,8 @@ _tls.current = greenlet try: raise exc, value, tb + except GreenletExit, e: + res = e finally: _continuation.permute(greenlet, greenlet.parent) + return ((res,), None) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -48,11 +48,6 @@ "termios", "_minimal_curses", ])) -working_oo_modules = default_modules.copy() -working_oo_modules.update(dict.fromkeys( - ["_md5", "_sha", "cStringIO", "itertools"] -)) - # XXX this should move somewhere else, maybe to platform ("is this posixish" # check or something) if sys.platform == "win32": @@ -340,10 +335,6 @@ if not IS_64_BITS: config.objspace.std.suggest(withsmalllong=True) - # some optimizations have different effects depending on the typesystem - if type_system == 'ootype': - config.objspace.std.suggest(multimethods="doubledispatch") - # extra optimizations with the JIT if level == 'jit': config.objspace.std.suggest(withcelldict=True) @@ -351,10 +342,7 @@ def enable_allworkingmodules(config): - if config.translation.type_system == 'ootype': - modules = working_oo_modules - else: - modules = working_modules + modules = working_modules if config.translation.sandbox: modules = default_modules # ignore names from 'essential_modules', notably 'exceptions', which diff --git a/pypy/doc/cli-backend.rst b/pypy/doc/cli-backend.rst deleted file mode 100644 --- a/pypy/doc/cli-backend.rst +++ /dev/null @@ -1,455 +0,0 @@ -=============== -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -=============================== - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the `flow graph`_ we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - - -Handling platform differences -============================= - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine -================================= - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types ------------------------ - -The `rtyper`_ give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - -Mapping built-in types ----------------------- - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions --------------------- - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - -Mapping exceptions ------------------- - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs ------------------------ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -------------------- - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - -The Runtime Environment ------------------------ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI -============== - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - -Installing Python for .NET on Linux -=================================== - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - - -.. _`Standard Ecma 335`: http://www.ecma-international.org/publications/standards/Ecma-335.htm -.. _`flow graph`: translation.html#the-flow-model -.. _`rtyper`: rtyper.html -.. _`Python for .NET`: http://pythonnet.sourceforge.net/ diff --git a/pypy/doc/clr-module.rst b/pypy/doc/clr-module.rst deleted file mode 100644 --- a/pypy/doc/clr-module.rst +++ /dev/null @@ -1,143 +0,0 @@ -=============================== -The ``clr`` module for PyPy.NET -=============================== - -PyPy.NET give you access to the surrounding .NET environment via the -``clr`` module. This module is still experimental: some features are -still missing and its interface might change in next versions, but -it's still useful to experiment a bit with PyPy.NET. - -PyPy.NET provides an import hook that lets you to import .NET namespaces -seamlessly as they were normal Python modules. Then, - -PyPY.NET native classes try to behave as much as possible in the -"expected" way both for the developers used to .NET and for the ones -used to Python. - -In particular, the following features are mapped one to one because -they exist in both worlds: - - - .NET constructors are mapped to the Python __init__ method; - - - .NET instance methods are mapped to Python methods; - - - .NET static methods are mapped to Python static methods (belonging - to the class); - - - .NET properties are mapped to property-like Python objects (very - similar to the Python ``property`` built-in); - - - .NET indexers are mapped to Python __getitem__ and __setitem__; - - - .NET enumerators are mapped to Python iterators. - -Moreover, all the usual Python features such as bound and unbound -methods are available as well. - -Example of usage -================ - -Here is an example of interactive session using the ``clr`` module:: - - >>>> from System.Collections import ArrayList - >>>> obj = ArrayList() - >>>> obj.Add(1) - 0 - >>>> obj.Add(2) - 1 - >>>> obj.Add("foo") - 2 - >>>> print obj[0], obj[1], obj[2] - 1 2 foo - >>>> print obj.Count - 3 - -Conversion of parameters -======================== - -When calling a .NET method Python objects are converted to .NET -objects. Lots of effort have been taken to make the conversion as -much transparent as possible; in particular, all the primitive types -such as int, float and string are converted to the corresponding .NET -types (e.g., ``System.Int32``, ``System.Float64`` and -``System.String``). - -Python objects without a corresponding .NET types (e.g., instances of -user classes) are passed as "black boxes", for example to be stored in -some sort of collection. - -The opposite .NET to Python conversions happens for the values returned -by the methods. Again, primitive types are converted in a -straightforward way; non-primitive types are wrapped in a Python object, -so that they can be treated as usual. - -Overload resolution -=================== - -When calling an overloaded method, PyPy.NET tries to find the best -overload for the given arguments; for example, consider the -``System.Math.Abs`` method:: - - - >>>> from System import Math - >>>> Math.Abs(-42) - 42 - >>>> Math.Abs(-42.0) - 42.0 - -``System.Math.Abs`` has got overloadings both for integers and floats: -in the first case we call the method ``System.Math.Abs(int32)``, while -in the second one we call the method ``System.Math.Abs(float64)``. - -If the system can't find a best overload for the given parameters, a -TypeError exception is raised. - - -Generic classes -================ - -Generic classes are fully supported. To instantiate a generic class, you need -to use the ``[]`` notation:: - - >>>> from System.Collections.Generic import List - >>>> mylist = List[int]() - >>>> mylist.Add(42) - >>>> mylist.Add(43) - >>>> mylist.Add("foo") - Traceback (most recent call last): - File "", line 1, in - TypeError: No overloads for Add could match - >>>> mylist[0] - 42 - >>>> for item in mylist: print item - 42 - 43 - - -External assemblies and Windows Forms -===================================== - -By default, you can only import .NET namespaces that belongs to already loaded -assemblies. To load additional .NET assemblies, you can use -``clr.AddReferenceByPartialName``. The following example loads -``System.Windows.Forms`` and ``System.Drawing`` to display a simple Windows -Form displaying the usual "Hello World" message:: - - >>>> import clr - >>>> clr.AddReferenceByPartialName("System.Windows.Forms") - >>>> clr.AddReferenceByPartialName("System.Drawing") - >>>> from System.Windows.Forms import Application, Form, Label - >>>> from System.Drawing import Point - >>>> - >>>> frm = Form() - >>>> frm.Text = "The first pypy-cli Windows Forms app ever" - >>>> lbl = Label() - >>>> lbl.Text = "Hello World!" - >>>> lbl.AutoSize = True - >>>> lbl.Location = Point(100, 100) - >>>> frm.Controls.Add(lbl) - >>>> Application.Run(frm) - -Unfortunately at the moment you can't do much more than this with Windows -Forms, because we still miss support for delegates and so it's not possible -to handle events. diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -626,7 +626,7 @@ Here is the order in which PyPy looks up Python modules: -*pypy/modules* +*pypy/module* mixed interpreter/app-level builtin modules, such as the ``sys`` and ``__builtin__`` module. @@ -657,7 +657,7 @@ on some classes being old-style. We just maintain those changes in place, -to see what is changed we have a branch called `vendot/stdlib` +to see what is changed we have a branch called `vendor/stdlib` wich contains the unmodified cpython stdlib .. _`mixed module mechanism`: diff --git a/pypy/doc/config/objspace.usemodules.clr.txt b/pypy/doc/config/objspace.usemodules.clr.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.usemodules.clr.txt +++ /dev/null @@ -1,1 +0,0 @@ -Use the 'clr' module. diff --git a/pypy/doc/config/translation.cli.trace_calls.txt b/pypy/doc/config/translation.cli.trace_calls.txt deleted file mode 100644 --- a/pypy/doc/config/translation.cli.trace_calls.txt +++ /dev/null @@ -1,3 +0,0 @@ -Internal. Debugging aid for the CLI backend. - -.. internal diff --git a/pypy/doc/config/translation.ootype.mangle.txt b/pypy/doc/config/translation.ootype.mangle.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.mangle.txt +++ /dev/null @@ -1,3 +0,0 @@ -Mangle the names of user defined attributes of the classes, in order -to ensure that every name is unique. Default is true, and it should -not be turned off unless you know what you are doing. diff --git a/pypy/doc/config/translation.ootype.txt b/pypy/doc/config/translation.ootype.txt deleted file mode 100644 --- a/pypy/doc/config/translation.ootype.txt +++ /dev/null @@ -1,1 +0,0 @@ -This group contains options specific for ootypesystem. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -45,7 +45,6 @@ binascii bz2 cStringIO - clr cmath `cpyext`_ crypt diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst --- a/pypy/doc/dir-reference.rst +++ b/pypy/doc/dir-reference.rst @@ -1,8 +1,8 @@ -PyPy directory cross-reference +PyPy directory cross-reference ------------------------------ -Here is a fully referenced alphabetical two-level deep -directory overview of PyPy: +Here is a fully referenced alphabetical two-level deep +directory overview of PyPy: ================================= ============================================ Directory explanation/links @@ -24,7 +24,7 @@ ``doc/*/`` other specific documentation topics or tools `pypy/interpreter/`_ `bytecode interpreter`_ and related objects - (frames, functions, modules,...) + (frames, functions, modules,...) `pypy/interpreter/pyparser/`_ interpreter-level Python source parser @@ -32,7 +32,7 @@ via an AST representation `pypy/module/`_ contains `mixed modules`_ - implementing core modules with + implementing core modules with both application and interpreter level code. Not all are finished and working. Use the ``--withmod-xxx`` @@ -45,7 +45,7 @@ objects and types `pypy/tool/`_ various utilities and hacks used - from various places + from various places `pypy/tool/algo/`_ general-purpose algorithmic and mathematic tools @@ -54,7 +54,7 @@ `rpython/annotator/`_ `type inferencing code`_ for - `RPython`_ programs + `RPython`_ programs `rpython/config/`_ handles the numerous options for RPython @@ -65,65 +65,56 @@ `rpython/rlib/`_ a `"standard library"`_ for RPython_ programs -`rpython/rtyper/`_ the `RPython Typer`_ +`rpython/rtyper/`_ the `RPython Typer`_ `rpython/rtyper/lltypesystem/`_ the `low-level type system`_ for C-like backends -`rpython/rtyper/ootypesystem/`_ the `object-oriented type system`_ - for OO backends - `rpython/memory/`_ the `garbage collector`_ construction framework `rpython/translator/`_ translation_ backends and support code -`rpython/translator/backendopt/`_ general optimizations that run before a +`rpython/translator/backendopt/`_ general optimizations that run before a backend generates code `rpython/translator/c/`_ the `GenC backend`_, producing C code from an RPython program (generally via the rtyper_) -`rpython/translator/cli/`_ the `CLI backend`_ for `.NET`_ - (Microsoft CLR or Mono_) - `pypy/goal/`_ our `main PyPy-translation scripts`_ live here -`rpython/translator/jvm/`_ the Java backend - `rpython/translator/tool/`_ helper tools for translation `dotviewer/`_ `graph viewer`_ ``*/test/`` many directories have a test subdirectory - containing test - modules (see `Testing in PyPy`_) + containing test + modules (see `Testing in PyPy`_) ``_cache/`` holds cache files from various purposes ================================= ============================================ .. _`bytecode interpreter`: interpreter.html -.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy -.. _`mixed modules`: coding-guide.html#mixed-modules -.. _`modules`: coding-guide.html#modules +.. _`Testing in PyPy`: coding-guide.html#testing-in-pypy +.. _`mixed modules`: coding-guide.html#mixed-modules +.. _`modules`: coding-guide.html#modules .. _`basil`: http://people.cs.uchicago.edu/~jriehl/BasilTalk.pdf .. _`object space`: objspace.html -.. _FlowObjSpace: objspace.html#the-flow-object-space +.. _FlowObjSpace: objspace.html#the-flow-object-space .. _`transparent proxies`: objspace-proxies.html#tproxy .. _`Differences between PyPy and CPython`: cpython_differences.html .. _`What PyPy can do for your objects`: objspace-proxies.html .. _`Continulets and greenlets`: stackless.html -.. _StdObjSpace: objspace.html#the-standard-object-space +.. _StdObjSpace: objspace.html#the-standard-object-space .. _`abstract interpretation`: http://en.wikipedia.org/wiki/Abstract_interpretation -.. _`rpython`: coding-guide.html#rpython -.. _`type inferencing code`: translation.html#the-annotation-pass -.. _`RPython Typer`: translation.html#rpython-typer +.. _`rpython`: coding-guide.html#rpython +.. _`type inferencing code`: translation.html#the-annotation-pass +.. _`RPython Typer`: translation.html#rpython-typer .. _`testing methods`: coding-guide.html#testing-in-pypy -.. _`translation`: translation.html -.. _`GenC backend`: translation.html#genc -.. _`CLI backend`: cli-backend.html +.. _`translation`: translation.html +.. _`GenC backend`: translation.html#genc .. _`py.py`: getting-started-python.html#the-py.py-interpreter .. _`translatorshell.py`: getting-started-dev.html#try-out-the-translator .. _JIT: jit/index.html diff --git a/pypy/doc/discussion/VM-integration.rst b/pypy/doc/discussion/VM-integration.rst deleted file mode 100644 --- a/pypy/doc/discussion/VM-integration.rst +++ /dev/null @@ -1,263 +0,0 @@ -============================================== -Integration of PyPy with host Virtual Machines -============================================== - -This document is based on the discussion I had with Samuele during the -Duesseldorf sprint. It's not much more than random thoughts -- to be -reviewed! - -Terminology disclaimer: both PyPy and .NET have the concept of -"wrapped" or "boxed" objects. To avoid confusion I will use "wrapping" -on the PyPy side and "boxing" on the .NET side. - -General idea -============ - -The goal is to find a way to efficiently integrate the PyPy -interpreter with the hosting environment such as .NET. What we would -like to do includes but it's not limited to: - - - calling .NET methods and instantiate .NET classes from Python - - - subclass a .NET class from Python - - - handle native .NET objects as transparently as possible - - - automatically apply obvious Python <--> .NET conversions when - crossing the borders (e.g. integers, string, etc.) - -One possible solution is the "proxy" approach, in which we manually -(un)wrap/(un)box all the objects when they cross the border. - -Example -------- - - :: - - public static int foo(int x) { return x} - - >>>> from somewhere import foo - >>>> print foo(42) - -In this case we need to take the intval field of W_IntObject, box it -to .NET System.Int32, call foo using reflection, then unbox the return -value and reconstruct a new (or reuse an existing one) W_IntObject. - -The other approach ------------------- - -The general idea to solve handle this problem is to split the -"stateful" and "behavioral" parts of wrapped objects, and use already -boxed values for storing the state. - -This way when we cross the Python --> .NET border we can just throw -away the behavioral part; when crossing .NET --> Python we have to -find the correct behavioral part for that kind of boxed object and -reconstruct the pair. - - -Split state and behaviour in the flowgraphs -=========================================== - -The idea is to write a graph transformation that takes an usual -ootyped flowgraph and split the classes and objects we want into a -stateful part and a behavioral part. - -We need to introduce the new ootypesystem type ``Pair``: it acts like -a Record but it hasn't its own identity: the id of the Pair is the id -of its first member. - - XXX about ``Pair``: I'm not sure this is totally right. It means - that an object can change identity simply by changing the value of a - field??? Maybe we could add the constraint that the "id" field - can't be modified after initialization (but it's not easy to - enforce). - - XXX-2 about ``Pair``: how to implement it in the backends? One - possibility is to use "struct-like" types if available (as in - .NET). But in this case it's hard to implement methods/functions - that modify the state of the object (such as __init__, usually). The - other possibility is to use a reference type (i.e., a class), but in - this case there will be a gap between the RPython identity (in which - two Pairs with the same state are indistinguishable) and the .NET - identity (in which the two objects will have a different identity, - of course). - -Step 1: RPython source code ---------------------------- - - :: - - class W_IntObject: - def __init__(self, intval): - self.intval = intval - - def foo(self, x): - return self.intval + x - - def bar(): - x = W_IntObject(41) - return x.foo(1) - - -Step 2: RTyping ---------------- - -Sometimes the following examples are not 100% accurate for the sake of -simplicity (e.g: we directly list the type of methods instead of the -ootype._meth instances that contains it). - -Low level types - - :: - - W_IntObject = Instance( - "W_IntObject", # name - ootype.OBJECT, # base class - {"intval": (Signed, 0)}, # attributes - {"foo": Meth([Signed], Signed)} # methods - ) - - -Prebuilt constants (referred by name in the flowgraphs) - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject) - 2. oosetfield(x, "meta", W_IntObject_meta_pbc) - 3. direct_call(W_IntObject.__init__, x, 41) - 4. result = oosend("foo", x, 1) - 5. return result - } - - W_IntObject.__init__(W_IntObject self, Signed intval) { - 1. oosetfield(self, "intval", intval) - } - - W_IntObject.foo(W_IntObject self, Signed x) { - 1. value = oogetfield(self, "value") - 2. result = int_add(value, x) - 3. return result - } - -Step 3: Transformation ----------------------- - -This step is done before the backend plays any role, but it's still -driven by its need, because at this time we want a mapping that tell -us what classes to split and how (i.e., which boxed value we want to -use). - -Let's suppose we want to map W_IntObject.intvalue to the .NET boxed -``System.Int32``. This is possible just because W_IntObject contains -only one field. Note that the "meta" field inherited from -ootype.OBJECT is special-cased because we know that it will never -change, so we can store it in the behaviour. - - -Low level types - - :: - - W_IntObject_bhvr = Instance( - "W_IntObject_bhvr", - ootype.OBJECT, - {}, # no more fields! - {"foo": Meth([W_IntObject_pair, Signed], Signed)} # the Pair is also explicitly passed - ) - - W_IntObject_pair = Pair( - ("value", (System.Int32, 0)), # (name, (TYPE, default)) - ("behaviour", (W_IntObject_bhvr, W_IntObject_bhvr_pbc)) - ) - - -Prebuilt constants - - :: - - W_IntObject_meta_pbc = (...) - W_IntObject.__init__ = (static method pbc - see below for the graph) - W_IntObject_bhvr_pbc = new(W_IntObject_bhvr); W_IntObject_bhvr_pbc.meta = W_IntObject_meta_pbc - W_IntObject_value_default = new System.Int32(0) - - -Flowgraphs - - :: - - bar() { - 1. x = new(W_IntObject_pair) # the behaviour has been already set because - # it's the default value of the field - - 2. # skipped (meta is already set in the W_IntObject_bhvr_pbc) - - 3. direct_call(W_IntObject.__init__, x, 41) - - 4. bhvr = oogetfield(x, "behaviour") - result = oosend("foo", bhvr, x, 1) # note that "x" is explicitly passed to foo - - 5. return result - } - - W_IntObject.__init__(W_IntObjectPair self, Signed value) { - 1. boxed = clibox(value) # boxed is of type System.Int32 - oosetfield(self, "value", boxed) - } - - W_IntObject.foo(W_IntObject_bhvr bhvr, W_IntObject_pair self, Signed x) { - 1. boxed = oogetfield(self, "value") - value = unbox(boxed, Signed) - - 2. result = int_add(value, x) - - 3. return result - } - - -Inheritance ------------ - -Apply the transformation to a whole class (sub)hierarchy is a bit more -complex. Basically we want to mimic the same hierarchy also on the -``Pair``\s, but we have to fight the VM limitations. In .NET for -example, we can't have "covariant fields":: - - class Base { - public Base field; - } - - class Derived: Base { - public Derived field; - } - -A solution is to use only kind of ``Pair``, whose ``value`` and -``behaviour`` type are of the most precise type that can hold all the -values needed by the subclasses:: - - class W_Object: pass - class W_IntObject(W_Object): ... - class W_StringObject(W_Object): ... - - ... - - W_Object_pair = Pair(System.Object, W_Object_bhvr) - -Where ``System.Object`` is of course the most precise type that can -hold both ``System.Int32`` and ``System.String``. - -This means that the low level type of all the ``W_Object`` subclasses -will be ``W_Object_pair``, but it also means that we will need to -insert the appropriate downcasts every time we want to access its -fields. I'm not sure how much this can impact performances. - - diff --git a/pypy/doc/discussion/outline-external-ootype.rst b/pypy/doc/discussion/outline-external-ootype.rst deleted file mode 100644 --- a/pypy/doc/discussion/outline-external-ootype.rst +++ /dev/null @@ -1,187 +0,0 @@ -Some discussion about external objects in ootype -================================================ - -Current approach: - -* SomeCliXxx for .NET backend - -SomeCliXxx ----------- - -* Supports method overloading - -* Supports inheritance in a better way - -* Supports static methods - -Would be extremely cool to generalize the approach to be useful also for the -JVM backend. Here are some notes: - -* There should be one mechanism, factored out nicely out of any backend, - to support any possible backend (cli, jvm for now). - -* This approach might be eventually extended by a backend itself, but - as much as possible code should be factored out. - -* Backend should take care itself about creating such classes, either - manually or automatically. - -* Should support superset of needs of all backends (ie callbacks, - method overloading, etc.) - - -Proposal of alternative approach -================================ - -The goal of the task is to let RPython program access "external -entities" which are available in the target platform; these include: - - - external classes (e.g. for .NET: System.Collections.ArrayList) - - - external prebuilt instances (e.g. for .NET: typeof(System.Console)) - -External entities should behave as much as possible as "internal -entities". - -Moreover, we want to preserve the possibility of *testing* RPython -programs on top of CPython if possible. For example, it should be -possible to RPython programs using .NET external objects using -PythonNet; for JVM, there are JPype_ and JTool_, to be investigated: - -.. _JPype: http://jpype.sourceforge.net/ -.. _JTool: http://wiki.europython.eu/Talks/Jtool%20Java%20In%20The%20Python%20Vm - -How to represent types ----------------------- - -First, some definitions: - - - high-level types are the types used by the annotator - (SomeInteger() & co.) - - - low-level types are the types used by the rtyper (Signed & co.) - - - platform-level types are the types used by the backends (e.g. int32 for - .NET) - -Usually, RPython types are described "top-down": we start from the -annotation, then the rtyper transforms the high-level types into -low-level types, then the backend transforms low-level types into -platform-level types. E.g. for .NET, SomeInteger() -> Signed -> int32. - -External objects are different: we *already* know the platform-level -types of our objects and we can't modify them. What we need to do is -to specify an annotation that after the high-level -> low-level -> -platform-level transformation will give us the correct types. - -For primitive types it is usually easy to find the correct annotation; -if we have an int32, we know that it's ootype is Signed and the -corresponding annotation is SomeInteger(). - -For non-primitive types such as classes, we must use a "bottom-up" -approach: first, we need a description of platform-level interface of -the class; then we construct the corresponding low-level type and -teach the backends how to treat such "external types". Finally, we -wrap the low-level types into special "external annotation". - -For example, consider a simple existing .NET class:: - - class Foo { - public float bar(int x, int y) { ... } - } - -The corresponding low-level type could be something like this:: - - Foo = ootype.ExternalInstance({'bar': ([Signed, Signed], Float)}) - -Then, the annotation for Foo's instances is SomeExternalInstance(Foo). -This way, the transformation from high-level types to platform-level -types is straightforward and correct. - -Finally, we need support for static methods: similarly for classes, we -can define an ExternalStaticMeth low-level type and a -SomeExternalStaticMeth annotation. - - -How to describe types ---------------------- - -To handle external objects we must specify their signatures. For CLI -and JVM the job can be easily automatized, since the objects have got -precise signatures. - - -RPython interface ------------------ - -External objects are exposed as special Python objects that gets -annotated as SomeExternalXXX. Each backend can choose its own way to -provide these objects to the RPython programmer. - -External classes will be annotated as SomeExternalClass; two -operations are allowed: - - - call: used to instantiate the class, return an object which will - be annotated as SomeExternalInstance. - - - access to static methods: return an object which will be annotated - as SomeExternalStaticMeth. - -Instances are annotated as SomeExternalInstance. Prebuilt external objects are -annotated as SomeExternalInstance(const=...). - -Open issues ------------ - -Exceptions -~~~~~~~~~~ - -.NET and JVM users want to catch external exceptions in a natural way; -e.g.:: - - try: - ... - except System.OverflowException: - ... - -This is not straightforward because to make the flow objspace happy the -object which represent System.OverflowException must be a real Python -class that inherits from Exception. - -This means that the Python objects which represent external classes -must be Python classes itself, and that classes representing -exceptions must be special cased and made subclasses of Exception. - - -Inheritance -~~~~~~~~~~~ - -It would be nice to allow programmers to inherit from an external -class. Not sure about the implications, though. - -Special methods/properties -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In .NET there are special methods that can be accessed using a special -syntax, for example indexer or properties. It would be nice to have in -RPython the same syntax as C#, although we can live without that. - - -Implementation details ----------------------- - -The CLI backend use a similar approach right now, but it could be -necessary to rewrite a part of it. - -To represent low-level types, it uses NativeInstance, a subclass of -ootype.Instance that contains all the information needed by the -backend to reference the class (e.g., the namespace). It also supports -overloading. - -For annotations, it reuses SomeOOInstance, which is also a wrapper -around a low-level type but it has been designed for low-level -helpers. It might be saner to use another annotation not to mix apples -and oranges, maybe factoring out common code. - -I don't know whether and how much code can be reused from the existing -bltregistry. diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst --- a/pypy/doc/discussions.rst +++ b/pypy/doc/discussions.rst @@ -7,7 +7,7 @@ .. toctree:: - + discussion/finalizer-order.rst discussion/howtoimplementpickling.rst discussion/improve-rpython.rst diff --git a/pypy/doc/dot-net.rst b/pypy/doc/dot-net.rst deleted file mode 100644 --- a/pypy/doc/dot-net.rst +++ /dev/null @@ -1,11 +0,0 @@ -.NET support -============ - - .. warning:: - - The .NET backend within PyPy is unmaintained. This documentation may be out-of-date. We welcome contributors who are interested in doing the work to get this into shape. - -.. toctree:: - - cli-backend.rst - clr-module.rst diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -369,13 +369,11 @@ Which backends are there for the RPython toolchain? --------------------------------------------------- -Currently, there are backends for C_, the CLI_, and the JVM_. -All of these can translate the entire PyPy interpreter. +Currently, there only backends is C_. +It can translate the entire PyPy interpreter. To learn more about backends take a look at the `translation document`_. .. _C: translation.html#the-c-back-end -.. _CLI: cli-backend.html -.. _JVM: translation.html#genjvm .. _`translation document`: translation.html ------------------ diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -115,45 +115,6 @@ >>> f(6) 1 -Translating the flow graph to CLI or JVM code -+++++++++++++++++++++++++++++++++++++++++++++ - -PyPy also contains a `CLI backend`_ and JVM backend which -can translate flow graphs into .NET executables or a JVM jar -file respectively. Both are able to translate the entire -interpreter. You can try out the CLI and JVM backends -from the interactive translator shells as follows:: - - >>> def myfunc(a, b): return a+b - ... - >>> t = Translation(myfunc, [int, int]) - >>> t.annotate() - >>> f = t.compile_cli() # or compile_jvm() - >>> f(4, 5) - 9 - -The object returned by ``compile_cli`` or ``compile_jvm`` -is a wrapper around the real -executable: the parameters are passed as command line arguments, and -the returned value is read from the standard output. - -Once you have compiled the snippet, you can also try to launch the -executable directly from the shell. You will find the -executable in one of the ``/tmp/usession-*`` directories:: - - # For CLI: - $ mono /tmp/usession-trunk-/main.exe 4 5 - 9 - - # For JVM: - $ java -cp /tmp/usession-trunk-/pypy pypy.Main 4 5 - 9 - -To translate and run for the CLI you must have the SDK installed: Windows -users need the `.NET Framework SDK`_, while Linux and Mac users -can use Mono_. To translate and run for the JVM you must have a JDK -installed (at least version 6) and ``java``/``javac`` on your path. - A slightly larger example +++++++++++++++++++++++++ @@ -191,31 +152,31 @@ There are several environment variables you can find useful while playing with the RPython: ``PYPY_USESSION_DIR`` - RPython uses temporary session directories to store files that are generated during the + RPython uses temporary session directories to store files that are generated during the translation process(e.g., translated C files). ``PYPY_USESSION_DIR`` serves as a base directory for these session dirs. The default value for this variable is the system's temporary dir. ``PYPY_USESSION_KEEP`` - By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. + By default RPython keeps only the last ``PYPY_USESSION_KEEP`` (defaults to 3) session dirs inside ``PYPY_USESSION_DIR``. Increase this value if you want to preserve C files longer (useful when producing lots of lldebug builds). .. _`your own interpreters`: faq.html#how-do-i-compile-my-own-interpreters -.. _`start reading sources`: +.. _`start reading sources`: Where to start reading the sources ----------------------------------- +---------------------------------- PyPy is made from parts that are relatively independent of each other. You should start looking at the part that attracts you most (all paths are -relative to the PyPy top level directory). You may look at our `directory reference`_ +relative to the PyPy top level directory). You may look at our `directory reference`_ or start off at one of the following points: * `pypy/interpreter`_ contains the bytecode interpreter: bytecode dispatcher in `pypy/interpreter/pyopcode.py`_, frame and code objects in `pypy/interpreter/eval.py`_ and `pypy/interpreter/pyframe.py`_, function objects and argument passing in `pypy/interpreter/function.py`_ and `pypy/interpreter/argument.py`_, the object space interface definition in `pypy/interpreter/baseobjspace.py`_, modules in - `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode + `pypy/interpreter/module.py`_ and `pypy/interpreter/mixedmodule.py`_. Core types supporting the bytecode interpreter are defined in `pypy/interpreter/typedef.py`_. * `pypy/interpreter/pyparser`_ contains a recursive descent parser, @@ -253,7 +214,7 @@ .. _`RPython standard library`: rlib.html -.. _optionaltool: +.. _optionaltool: Running PyPy's unit tests @@ -284,7 +245,7 @@ # or for running tests of a whole subdirectory py.test pypy/interpreter/ -See `py.test usage and invocations`_ for some more generic info +See `py.test usage and invocations`_ for some more generic info on how you can run tests. Beware trying to run "all" pypy tests by pointing to the root @@ -352,14 +313,14 @@ .. _`interpreter-level and app-level`: coding-guide.html#interpreter-level -.. _`trace example`: +.. _`trace example`: Tracing bytecode and operations on objects -++++++++++++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++++++++++ You can use the trace object space to monitor the interpretation -of bytecodes in connection with object space operations. To enable -it, set ``__pytrace__=1`` on the interactive PyPy console:: +of bytecodes in connection with object space operations. To enable +it, set ``__pytrace__=1`` on the interactive PyPy console:: >>>> __pytrace__ = 1 Tracing enabled @@ -384,24 +345,24 @@ .. _`example-interpreter`: https://bitbucket.org/pypy/example-interpreter -Additional Tools for running (and hacking) PyPy +Additional Tools for running (and hacking) PyPy ----------------------------------------------- -We use some optional tools for developing PyPy. They are not required to run +We use some optional tools for developing PyPy. They are not required to run the basic tests or to get an interactive PyPy prompt but they help to -understand and debug PyPy especially for the translation process. +understand and debug PyPy especially for the translation process. graphviz & pygame for flow graph viewing (highly recommended) +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ graphviz and pygame are both necessary if you -want to look at generated flow graphs: +want to look at generated flow graphs: - graphviz: http://www.graphviz.org/Download.php + graphviz: http://www.graphviz.org/Download.php pygame: http://www.pygame.org/download.shtml -py.test and the py lib +py.test and the py lib +++++++++++++++++++++++ The `py.test testing tool`_ drives all our testing needs. @@ -412,7 +373,7 @@ You don't necessarily need to install these two libraries because we also ship them inlined in the PyPy source tree. -Getting involved +Getting involved ----------------- PyPy employs an open development process. You are invited to join our diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -6,7 +6,7 @@ PyPy's Python interpreter is a very compliant Python -interpreter implemented in RPython. When compiled, it passes most of +interpreter implemented in RPython. When compiled, it passes most of `CPythons core language regression tests`_ and comes with many of the extension modules included in the standard library including ``ctypes``. It can run large libraries such as Django_ and Twisted_. There are some small behavioral @@ -18,8 +18,8 @@ .. _`CPython differences`: cpython_differences.html -To actually use PyPy's Python interpreter, the first thing to do is to -`download a pre-built PyPy`_ for your architecture. +To actually use PyPy's Python interpreter, the first thing to do is to +`download a pre-built PyPy`_ for your architecture. .. _`download a pre-built PyPy`: http://pypy.org/download.html @@ -31,8 +31,8 @@ .. _`windows document`: windows.html -You can translate the whole of PyPy's Python interpreter to low level C code, -or `CLI code`_. If you intend to build using gcc, check to make sure that +You can translate the whole of PyPy's Python interpreter to low level C code. +If you intend to build using gcc, check to make sure that the version you have is not 4.2 or you will run into `this bug`_. .. _`this bug`: https://bugs.launchpad.net/ubuntu/+source/gcc-4.2/+bug/187391 @@ -71,9 +71,9 @@ 3. Translation is time-consuming -- 45 minutes on a very fast machine -- - and RAM-hungry. As of March 2011, you will need **at least** 2 GB of - memory on a - 32-bit machine and 4GB on a 64-bit machine. If your memory resources + and RAM-hungry. As of March 2011, you will need **at least** 2 GB of + memory on a + 32-bit machine and 4GB on a 64-bit machine. If your memory resources are constrained, or your machine is slow you might want to pick the `optimization level`_ `1` in the next step. A level of `2` or `3` or `jit` gives much better results, though. But if all @@ -82,7 +82,7 @@ Let me stress this again: at ``--opt=1`` you get the Boehm GC, which is here mostly for historical and for testing reasons. - You really do not want to pick it for a program you intend to use. + You really do not want to pick it for a program you intend to use. The resulting ``pypy-c`` is slow. 4. Run:: @@ -91,8 +91,8 @@ python ../../rpython/bin/rpython --opt=jit targetpypystandalone.py possibly replacing ``--opt=jit`` with another `optimization level`_ - of your choice like ``--opt=2`` if you do not want to include the JIT - compiler, which makes the Python interpreter much slower. + of your choice. Typical example: ``--opt=2`` gives a good (but of + course slower) Python interpreter without the JIT. .. _`optimization level`: config/opt.html @@ -119,7 +119,7 @@ >>>> pystone.main() Pystone(1.1) time for 50000 passes = 0.060004 This machine benchmarks at 833278 pystones/second - >>>> + >>>> Note that pystone gets faster as the JIT kicks in. This executable can be moved around or copied on other machines; see @@ -142,70 +142,6 @@ .. _`objspace proxies`: objspace-proxies.html -.. _`CLI code`: - -Translating using the CLI backend -+++++++++++++++++++++++++++++++++ - -**Note: the CLI backend is no longer maintained** - -To create a standalone .NET executable using the `CLI backend`_:: - - ./translate.py --backend=cli targetpypystandalone.py - -The executable and all its dependencies will be stored in the -./pypy-cli-data directory. To run pypy.NET, you can run -./pypy-cli-data/main.exe. If you are using Linux or Mac, you can use -the convenience ./pypy-cli script:: - - $ ./pypy-cli - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``distopian and utopian chairs'' - >>>> - -Moreover, at the moment it's not possible to do the full translation -using only the tools provided by the Microsoft .NET SDK, since -``ilasm`` crashes when trying to assemble the pypy-cli code due to its -size. Microsoft .NET SDK 2.0.50727.42 is affected by this bug; other -versions could be affected as well: if you find a version of the SDK -that works, please tell us. - -Windows users that want to compile their own pypy-cli can install -Mono_: if a Mono installation is detected the translation toolchain -will automatically use its ``ilasm2`` tool to assemble the -executables. - -To try out the experimental .NET integration, check the documentation of the -clr_ module. - -.. not working now: - - .. _`JVM code`: - - Translating using the JVM backend - +++++++++++++++++++++++++++++++++ - - To create a standalone JVM executable:: - - ./translate.py --backend=jvm targetpypystandalone.py - - This will create a jar file ``pypy-jvm.jar`` as well as a convenience - script ``pypy-jvm`` for executing it. To try it out, simply run - ``./pypy-jvm``:: - - $ ./pypy-jvm - Python 2.7.0 (61ef2a11b56a, Mar 02 2011, 03:00:11) - [PyPy 1.6.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - And now for something completely different: ``# assert did not crash'' - >>>> - - Alternatively, you can run it using ``java -jar pypy-jvm.jar``. At the moment - the executable does not provide any interesting features, like integration with - Java. - Installation ++++++++++++ @@ -258,22 +194,22 @@ cd pypy python bin/pyinteractive.py -After a few seconds (remember: this is running on top of CPython), -you should be at the PyPy prompt, which is the same as the Python +After a few seconds (remember: this is running on top of CPython), +you should be at the PyPy prompt, which is the same as the Python prompt, but with an extra ">". Now you are ready to start running Python code. Most Python -modules should work if they don't involve CPython extension +modules should work if they don't involve CPython extension modules. **This is slow, and most C modules are not present by default even if they are standard!** Here is an example of -determining PyPy's performance in pystones:: +determining PyPy's performance in pystones:: - >>>> from test import pystone + >>>> from test import pystone >>>> pystone.main(10) The parameter is the number of loops to run through the test. The default is 50000, which is far too many to run in a non-translated -PyPy version (i.e. when PyPy's interpreter itself is being interpreted +PyPy version (i.e. when PyPy's interpreter itself is being interpreted by CPython). pyinteractive.py options @@ -300,9 +236,7 @@ .. _Mono: http://www.mono-project.com/Main_Page -.. _`CLI backend`: cli-backend.html .. _`Boehm-Demers-Weiser garbage collector`: http://www.hpl.hp.com/personal/Hans_Boehm/gc/ -.. _clr: clr-module.html .. _`CPythons core language regression tests`: http://buildbot.pypy.org/summary?category=applevel&branch=%3Ctrunk%3E .. include:: _ref.txt diff --git a/pypy/doc/glossary.rst b/pypy/doc/glossary.rst --- a/pypy/doc/glossary.rst +++ b/pypy/doc/glossary.rst @@ -26,8 +26,7 @@ backend Code generator that converts an `RPython `__ program to a `target - language`_ using the :term:`RPython toolchain`. A backend uses either the - :term:`lltypesystem` or the :term:`ootypesystem`. + language`_ using the :term:`RPython toolchain`. compile-time In the context of the :term:`JIT`, compile time is when the JIT is @@ -84,12 +83,6 @@ extend or twist these semantics, or c) serve whole-program analysis purposes. - ootypesystem - An `object oriented type model `__ - containing classes and instances. A :term:`backend` that uses this type system - is also called a high-level backend. The JVM and CLI backends - all use this typesystem. - prebuilt constant In :term:`RPython` module globals are considered constants. Moreover, global (i.e. prebuilt) lists and dictionaries are supposed to be diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -30,7 +30,7 @@ * update README * change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: - force-builds.py /release/ + force-builds.py * wait for builds to complete, make sure there are no failures * upload binaries to https://bitbucket.org/pypy/pypy/downloads diff --git a/pypy/doc/project-documentation.rst b/pypy/doc/project-documentation.rst --- a/pypy/doc/project-documentation.rst +++ b/pypy/doc/project-documentation.rst @@ -72,8 +72,6 @@ `command line reference`_ -`CLI backend`_ describes the details of the .NET backend. - `JIT Generation in PyPy`_ describes how we produce the Python Just-in-time Compiler from our Python interpreter. diff --git a/pypy/doc/release-2.1.0-beta2.rst b/pypy/doc/release-2.1.0-beta2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0-beta2.rst @@ -0,0 +1,66 @@ +=============== +PyPy 2.1 beta 2 +=============== + +We're pleased to announce the second beta of the upcoming 2.1 release of PyPy. +This beta adds one new feature to the 2.1 release and contains several bugfixes listed below. + +You can download the PyPy 2.1 beta 1 release here: + + http://pypy.org/download.html + +Highlights +========== + +* Support for os.statvfs and os.fstatvfs on unix systems. + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in pypy sometimes failed with a "bad write retry" message. + From noreply at buildbot.pypy.org Wed Jul 31 11:24:04 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 31 Jul 2013 11:24:04 +0200 (CEST) Subject: [pypy-commit] pypy default: fix test, related to register allocation changes Message-ID: <20130731092404.6EA8B1C054B@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r65851:7b4d87a9afe9 Date: 2013-07-31 04:09 -0500 http://bitbucket.org/pypy/pypy/changeset/7b4d87a9afe9/ Log: fix test, related to register allocation changes diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -88,7 +88,7 @@ else: assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): - assert nos == [9, 10, 47] + assert nos == [0, 1, 47] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] From noreply at buildbot.pypy.org Wed Jul 31 11:34:22 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 11:34:22 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: another failing test for stmrewrite Message-ID: <20130731093422.B9C271C054B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65852:e3b44b46c55c Date: 2013-07-31 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/e3b44b46c55c/ Log: another failing test for stmrewrite diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -64,6 +64,30 @@ jump() """, t=NULL) + def test_invalidate_read_status_after_write_to_constptr(self): + TP = lltype.GcArray(lltype.Signed) + NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) + self.check_rewrite(""" + [p0] + p1 = same_as(ConstPtr(t)) + p2 = same_as(ConstPtr(t)) + p3 = getfield_gc(p1, descr=tzdescr) + setfield_gc(p2, p0, descr=tzdescr) + p4 = getfield_gc(p1, descr=tzdescr) + jump() + """, """ + [p0] + p1 = same_as(ConstPtr(t)) + p2 = same_as(ConstPtr(t)) + cond_call_stm_b(p1, descr=P2Rdescr) + p3 = getfield_gc(p1, descr=tzdescr) + cond_call_stm_b(p2, descr=P2Wdescr) + setfield_gc(p2, p0, descr=tzdescr) + cond_call_stm_b(p1, descr=P2Rdescr) + p4 = getfield_gc(p1, descr=tzdescr) + jump() + """, t=NULL) + def test_invalidate_read_status_after_write(self): self.check_rewrite(""" [p0] From noreply at buildbot.pypy.org Wed Jul 31 11:34:24 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 11:34:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: needed for calling clear_exception_data_on_abort, I guess.. Message-ID: <20130731093424.A20BD1C054B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65853:f5e47c3c6884 Date: 2013-07-31 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/f5e47c3c6884/ Log: needed for calling clear_exception_data_on_abort, I guess.. diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -18,7 +18,20 @@ def clear_exception_data_on_abort(): # XXX: provisional API just to be safe # called by pypy/module/thread/stm:initialize_execution_context - llop.stm_clear_exception_data_on_abort(lltype.Void) + pass + +class ClearExceptionDataOnAbort(ExtRegistryEntry): + _about_ = clear_exception_data_on_abort + + def compute_result_annotation(self): + from rpython.annotator import model as annmodel + return annmodel.s_None + + def specialize_call(self, hop): + hop.exception_cannot_occur() + return hop.genop('stm_clear_exception_data_on_abort', [], + resulttype=lltype.Void) + @dont_look_inside def become_inevitable(): From noreply at buildbot.pypy.org Wed Jul 31 11:34:26 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 11:34:26 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: forgot the case where cmp(immed, immed) in ptr_eq Message-ID: <20130731093426.62FE41C054B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65854:94a435e2cdb5 Date: 2013-07-31 11:32 +0200 http://bitbucket.org/pypy/pypy/changeset/94a435e2cdb5/ Log: forgot the case where cmp(immed,immed) in ptr_eq diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2174,25 +2174,51 @@ # FASTPATH # # a == b -> SET NZ - sl = X86_64_SCRATCH_REG.lowest8bits() - mc.MOV(X86_64_SCRATCH_REG, a_base) - mc.CMP(X86_64_SCRATCH_REG, b_base) - mc.SET_ir(rx86.Conditions['Z'], sl.value) - mc.MOVZX8_rr(X86_64_SCRATCH_REG.value, sl.value) - # mc.TEST8_rr() without movzx8 - mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) - mc.J_il8(rx86.Conditions['NZ'], 0) - j_ok1 = mc.get_relative_pos() + if isinstance(a_base, ImmedLoc) and isinstance(b_base, ImmedLoc): + if a_base.getint() == b_base.getint(): + mc.XOR(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) + mc.INC(X86_64_SCRATCH_REG) # NZ flag + mc.JMP_l8(0) + j_ok1 = mc.get_relative_pos() + else: + # do the dance, even if a or b is an Immed + # XXX: figure out if CMP() is able to handle it without + # the explicit MOV before it (CMP(a_base, b_base)) + sl = X86_64_SCRATCH_REG.lowest8bits() + mc.MOV(X86_64_SCRATCH_REG, a_base) + mc.CMP(X86_64_SCRATCH_REG, b_base) + mc.SET_ir(rx86.Conditions['Z'], sl.value) + mc.MOVZX8_rr(X86_64_SCRATCH_REG.value, sl.value) + # mc.TEST8_rr() without movzx8 + mc.TEST_rr(X86_64_SCRATCH_REG.value, X86_64_SCRATCH_REG.value) + mc.J_il8(rx86.Conditions['NZ'], 0) + j_ok1 = mc.get_relative_pos() # a == 0 || b == 0 -> SET Z - mc.CMP(a_base, imm(0)) - mc.J_il8(rx86.Conditions['Z'], 0) - j_ok2 = mc.get_relative_pos() + if isinstance(a_base, ImmedLoc): + if a_base.getint() == 0: + # Z flag still set from above + mc.JMP_l8(0) + j_ok2 = mc.get_relative_pos() + else: + j_ok2 = 0 + else: + mc.CMP(a_base, imm(0)) + mc.J_il8(rx86.Conditions['Z'], 0) + j_ok2 = mc.get_relative_pos() # - mc.CMP(b_base, imm(0)) - mc.J_il8(rx86.Conditions['Z'], 0) - j_ok3 = mc.get_relative_pos() - + if isinstance(b_base, ImmedLoc): + if b_base.getint() == 0: + # set Z flag: + mc.XOR(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) + mc.JMP_l8(0) + j_ok3 = mc.get_relative_pos() + else: + j_ok3 = 0 + else: + mc.CMP(b_base, imm(0)) + mc.J_il8(rx86.Conditions['Z'], 0) + j_ok3 = mc.get_relative_pos() # a.type != b.type # XXX: todo, if it ever happens.. @@ -2214,10 +2240,12 @@ # OK: flags already set offset = mc.get_relative_pos() - j_ok1 mc.overwrite(j_ok1 - 1, chr(offset)) - offset = mc.get_relative_pos() - j_ok2 - mc.overwrite(j_ok2 - 1, chr(offset)) - offset = mc.get_relative_pos() - j_ok3 - mc.overwrite(j_ok3 - 1, chr(offset)) + if j_ok2: + offset = mc.get_relative_pos() - j_ok2 + mc.overwrite(j_ok2 - 1, chr(offset)) + if j_ok3: + offset = mc.get_relative_pos() - j_ok3 + mc.overwrite(j_ok3 - 1, chr(offset)) From noreply at buildbot.pypy.org Wed Jul 31 12:10:51 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 31 Jul 2013 12:10:51 +0200 (CEST) Subject: [pypy-commit] pypy foldable-getarrayitem-indexerror: a pypy-c test Message-ID: <20130731101051.34CE11C02E4@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: foldable-getarrayitem-indexerror Changeset: r65855:1a85c6a3e2fb Date: 2013-07-31 12:10 +0200 http://bitbucket.org/pypy/pypy/changeset/1a85c6a3e2fb/ Log: a pypy-c test diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -209,6 +209,22 @@ opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + def test_constfold_tuple(self): + code = """if 1: + tup = tuple(range(10000)) + l = [1, 2, 3, 4, 5, 6, "a"] + def main(n): + while n > 0: + sub = tup[1] # ID: getitem + l[1] = n # kill cache of tup[1] + n -= sub + """ + log = self.run(code, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) + assert log.opnames(ops) == [] + + def test_specialised_tuple(self): def main(n): import pypyjit From noreply at buildbot.pypy.org Wed Jul 31 12:18:49 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 31 Jul 2013 12:18:49 +0200 (CEST) Subject: [pypy-commit] pypy foldable-getarrayitem-indexerror: a what's new entry Message-ID: <20130731101849.D93861C02E4@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: foldable-getarrayitem-indexerror Changeset: r65856:b5c526aa2b94 Date: 2013-07-31 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b5c526aa2b94/ Log: a what's new entry diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -31,3 +31,6 @@ more precise information about which functions can be called. Needed for Topaz. .. branch: ssl_moving_write_buffer + +.. branch: foldable-getarrayitem-indexerror +Constant-fold reading out of constant tuples in PyPy. From noreply at buildbot.pypy.org Wed Jul 31 12:18:51 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 31 Jul 2013 12:18:51 +0200 (CEST) Subject: [pypy-commit] pypy foldable-getarrayitem-indexerror: close to-be-merged branch Message-ID: <20130731101851.6B6B61C02E4@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: foldable-getarrayitem-indexerror Changeset: r65857:6448f2f5ce8c Date: 2013-07-31 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/6448f2f5ce8c/ Log: close to-be-merged branch From noreply at buildbot.pypy.org Wed Jul 31 12:18:52 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 31 Jul 2013 12:18:52 +0200 (CEST) Subject: [pypy-commit] pypy default: merge foldable-getarrayitem-indexerror: constant-fold reads out of constant Message-ID: <20130731101852.9A4F31C02E4@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65858:27316dbf2693 Date: 2013-07-31 12:18 +0200 http://bitbucket.org/pypy/pypy/changeset/27316dbf2693/ Log: merge foldable-getarrayitem-indexerror: constant-fold reads out of constant applevel tuples diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -54,3 +54,7 @@ speeds up list.append() and list.pop(). .. branch: curses_fixes + +.. branch: foldable-getarrayitem-indexerror +Constant-fold reading out of constant tuples in PyPy. + diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -209,6 +209,22 @@ opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + def test_constfold_tuple(self): + code = """if 1: + tup = tuple(range(10000)) + l = [1, 2, 3, 4, 5, 6, "a"] + def main(n): + while n > 0: + sub = tup[1] # ID: getitem + l[1] = n # kill cache of tup[1] + n -= sub + """ + log = self.run(code, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) + assert log.opnames(ops) == [] + + def test_specialised_tuple(self): def main(n): import pypyjit diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -69,6 +69,28 @@ self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, getarrayitem_gc=0, getarrayitem_gc_pure=1) + def test_array_index_error(self): + class X(object): + _immutable_fields_ = ["y[*]"] + + def __init__(self, x): + self.y = x + + def get(self, index): + try: + return self.y[index] + except IndexError: + return -41 + + def f(index): + l = [1, 2, 3, 4] + l[2] = 30 + a = escape(X(l)) + return a.get(index) + res = self.interp_operations(f, [2], listops=True) + assert res == 30 + self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, + getarrayitem_gc=0, getarrayitem_gc_pure=1) def test_array_in_immutable(self): class X(object): diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -247,27 +247,22 @@ v_lst, v_index = hop.inputargs(r_lst, Signed) if checkidx: hop.exception_is_here() + spec = dum_checkidx else: + spec = dum_nocheck hop.exception_cannot_occur() - if hop.args_s[0].listdef.listitem.mutated or checkidx: - if hop.args_s[1].nonneg: - llfn = ll_getitem_nonneg - else: - llfn = ll_getitem - if checkidx: - spec = dum_checkidx - else: - spec = dum_nocheck - c_func_marker = hop.inputconst(Void, spec) - v_res = hop.gendirectcall(llfn, c_func_marker, v_lst, v_index) + if hop.args_s[0].listdef.listitem.mutated: + basegetitem = ll_getitem_fast else: - # this is the 'foldable' version, which is not used when - # we check for IndexError - if hop.args_s[1].nonneg: - llfn = ll_getitem_foldable_nonneg - else: - llfn = ll_getitem_foldable - v_res = hop.gendirectcall(llfn, v_lst, v_index) + basegetitem = ll_getitem_foldable_nonneg + + if hop.args_s[1].nonneg: + llfn = ll_getitem_nonneg + else: + llfn = ll_getitem + c_func_marker = hop.inputconst(Void, spec) + c_basegetitem = hop.inputconst(Void, basegetitem) + v_res = hop.gendirectcall(llfn, c_func_marker, c_basegetitem, v_lst, v_index) return r_lst.recast(hop.llops, v_res) rtype_getitem_key = rtype_getitem @@ -654,16 +649,16 @@ i += 1 length_1_i -= 1 -def ll_getitem_nonneg(func, l, index): +def ll_getitem_nonneg(func, basegetitem, l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") if func is dum_checkidx: if index >= l.ll_length(): raise IndexError - return l.ll_getitem_fast(index) + return basegetitem(l, index) ll_getitem_nonneg._always_inline_ = True # no oopspec -- the function is inlined by the JIT -def ll_getitem(func, l, index): +def ll_getitem(func, basegetitem, l, index): if func is dum_checkidx: length = l.ll_length() # common case: 0 <= index < length if r_uint(index) >= r_uint(length): @@ -680,21 +675,18 @@ if index < 0: index += l.ll_length() ll_assert(index >= 0, "negative list getitem index out of bound") + return basegetitem(l, index) +# no oopspec -- the function is inlined by the JIT + +def ll_getitem_fast(l, index): return l.ll_getitem_fast(index) -# no oopspec -- the function is inlined by the JIT +ll_getitem_fast._always_inline_ = True def ll_getitem_foldable_nonneg(l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") return l.ll_getitem_fast(index) ll_getitem_foldable_nonneg.oopspec = 'list.getitem_foldable(l, index)' -def ll_getitem_foldable(l, index): - if index < 0: - index += l.ll_length() - return ll_getitem_foldable_nonneg(l, index) -ll_getitem_foldable._always_inline_ = True -# no oopspec -- the function is inlined by the JIT - def ll_setitem_nonneg(func, l, index, newitem): ll_assert(index >= 0, "unexpectedly negative list setitem index") if func is dum_checkidx: diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -14,15 +14,19 @@ from rpython.translator.translator import TranslationContext -# undo the specialization parameter +# undo the specialization parameters for n1 in 'get set del'.split(): + if n1 == "get": + extraarg = "ll_getitem_fast, " + else: + extraarg = "" for n2 in '', '_nonneg': name = 'll_%sitem%s' % (n1, n2) globals()['_' + name] = globals()[name] exec """if 1: def %s(*args): - return _%s(dum_checkidx, *args) -""" % (name, name) + return _%s(dum_checkidx, %s*args) +""" % (name, name, extraarg) del n1, n2, name @@ -1400,7 +1404,7 @@ block = graph.startblock op = block.operations[-1] assert op.opname == 'direct_call' - func = op.args[0].value._obj._callable + func = op.args[2].value assert ('foldable' in func.func_name) == \ ("y[*]" in immutable_fields) @@ -1511,8 +1515,8 @@ block = graph.startblock lst1_getitem_op = block.operations[-3] # XXX graph fishing lst2_getitem_op = block.operations[-2] - func1 = lst1_getitem_op.args[0].value._obj._callable - func2 = lst2_getitem_op.args[0].value._obj._callable + func1 = lst1_getitem_op.args[2].value + func2 = lst2_getitem_op.args[2].value assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') From noreply at buildbot.pypy.org Wed Jul 31 13:31:25 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 13:31:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: yet another failing test for stmrewrite Message-ID: <20130731113125.7046D1C02E4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65859:7134c15384b9 Date: 2013-07-31 12:39 +0200 http://bitbucket.org/pypy/pypy/changeset/7134c15384b9/ Log: yet another failing test for stmrewrite diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -108,6 +108,26 @@ p5 = getfield_gc(p1, descr=tzdescr) """) + def test_invalidate_read_status_after_write_to_field(self): + self.check_rewrite(""" + [p0] + p1 = getfield_gc(p0, descr=tzdescr) + p2 = getfield_gc(p0, descr=tzdescr) + p3 = getfield_gc(p1, descr=tzdescr) + setfield_gc(p2, p0, descr=tzdescr) + p4 = getfield_gc(p1, descr=tzdescr) + """, """ + [p0] + p1 = getfield_gc(p0, descr=tzdescr) + p2 = getfield_gc(p0, descr=tzdescr) + cond_call_stm_b(p1, descr=P2Rdescr) + p3 = getfield_gc(p1, descr=tzdescr) + cond_call_stm_b(p2, descr=P2Wdescr) + setfield_gc(p2, p0, descr=tzdescr) + cond_call_stm_b(p1, descr=P2Rdescr) + p4 = getfield_gc(p1, descr=tzdescr) + """) + def test_rewrite_write_barrier_after_malloc(self): self.check_rewrite(""" [p1, p3] From noreply at buildbot.pypy.org Wed Jul 31 13:31:26 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 13:31:26 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: more testing, more fixes Message-ID: <20130731113126.C28031C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65860:455bc75d0676 Date: 2013-07-31 13:30 +0200 http://bitbucket.org/pypy/pypy/changeset/455bc75d0676/ Log: more testing, more fixes diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -816,6 +816,19 @@ raiseassert(x1 != ptrs[1]) raiseassert(x1 == ptrs[2]) raiseassert(x1 != ptrs[3]) + raiseassert(x2 == ptrs[0]) + raiseassert(x2 != ptrs[1]) + raiseassert(x2 != ptrs[2]) + raiseassert(x2 != ptrs[3]) + raiseassert(ptrs[0] is None) + raiseassert(ptrs[1] is not None) + raiseassert(ptrs[2] is not None) + raiseassert(ptrs[3] is not None) + raiseassert(ptrs[1] is x0) + raiseassert(ptrs[2] is x1) + raiseassert(x0 is not None) + raiseassert(x1 is not None) + raiseassert(x3 is None) # return n - 1, x, x0, x1, x2, x3, x4, x5, x6, x7, ptrs, s return before, f, None diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2176,10 +2176,12 @@ # a == b -> SET NZ if isinstance(a_base, ImmedLoc) and isinstance(b_base, ImmedLoc): if a_base.getint() == b_base.getint(): - mc.XOR(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) - mc.INC(X86_64_SCRATCH_REG) # NZ flag + mc.MOV_ri(X86_64_SCRATCH_REG.value, 1) + mc.TEST(X86_64_SCRATCH_REG, X86_64_SCRATCH_REG) # NZ flag mc.JMP_l8(0) j_ok1 = mc.get_relative_pos() + else: + j_ok1 = 0 else: # do the dance, even if a or b is an Immed # XXX: figure out if CMP() is able to handle it without @@ -2238,8 +2240,9 @@ # # OK: flags already set - offset = mc.get_relative_pos() - j_ok1 - mc.overwrite(j_ok1 - 1, chr(offset)) + if j_ok1: + offset = mc.get_relative_pos() - j_ok1 + mc.overwrite(j_ok1 - 1, chr(offset)) if j_ok2: offset = mc.get_relative_pos() - j_ok2 mc.overwrite(j_ok2 - 1, chr(offset)) From noreply at buildbot.pypy.org Wed Jul 31 13:40:30 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 13:40:30 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: and another test for stmrewrite.py Message-ID: <20130731114030.204BB1C029A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65861:6356290b8a58 Date: 2013-07-31 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/6356290b8a58/ Log: and another test for stmrewrite.py diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -118,6 +118,7 @@ p4 = getfield_gc(p1, descr=tzdescr) """, """ [p0] + cond_call_stm_b(p0, descr=P2Rdescr) p1 = getfield_gc(p0, descr=tzdescr) p2 = getfield_gc(p0, descr=tzdescr) cond_call_stm_b(p1, descr=P2Rdescr) @@ -128,6 +129,27 @@ p4 = getfield_gc(p1, descr=tzdescr) """) + def test_invalidate_read_status_after_write_to_array(self): + self.check_rewrite(""" + [p0, i1, i2] + p1 = getarrayitem_gc(p0, i1, descr=adescr) + p2 = getarrayitem_gc(p0, i2, descr=adescr) + p3 = getfield_gc(p1, descr=tzdescr) + setfield_gc(p2, p0, descr=tzdescr) + p4 = getfield_gc(p1, descr=tzdescr) + """, """ + [p0, i1, i2] + cond_call_stm_b(p0, descr=P2Rdescr) + p1 = getarrayitem_gc(p0, i1, descr=adescr) + p2 = getarrayitem_gc(p0, i2, descr=adescr) + cond_call_stm_b(p1, descr=P2Rdescr) + p3 = getfield_gc(p1, descr=tzdescr) + cond_call_stm_b(p2, descr=P2Rdescr) + setfield_gc(p2, p0, descr=tzdescr) + cond_call_stm_b(p1, descr=P2Rdescr) + p4 = getfield_gc(p1, descr=tzdescr) + """) + def test_rewrite_write_barrier_after_malloc(self): self.check_rewrite(""" [p1, p3] From noreply at buildbot.pypy.org Wed Jul 31 13:42:08 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 31 Jul 2013 13:42:08 +0200 (CEST) Subject: [pypy-commit] pypy default: make it possible to have the target return just the main function, which is by Message-ID: <20130731114208.4C7F81C029A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65862:cf4c83c12bb3 Date: 2013-07-31 13:41 +0200 http://bitbucket.org/pypy/pypy/changeset/cf4c83c12bb3/ Log: make it possible to have the target return just the main function, which is by far the most common case nowadays. diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -544,9 +544,14 @@ try: entry_point, inputtypes, policy = spec + except TypeError: + # not a tuple at all + entry_point = spec + inputtypes = policy = None except ValueError: + policy = None entry_point, inputtypes = spec - policy = None + driver.setup(entry_point, inputtypes, policy=policy, diff --git a/rpython/translator/goal/targetnopstandalone.py b/rpython/translator/goal/targetnopstandalone.py --- a/rpython/translator/goal/targetnopstandalone.py +++ b/rpython/translator/goal/targetnopstandalone.py @@ -19,4 +19,4 @@ # _____ Define and setup target ___ def target(*args): - return entry_point, None + return entry_point diff --git a/rpython/translator/goal/targetrpystonedalone.py b/rpython/translator/goal/targetrpystonedalone.py --- a/rpython/translator/goal/targetrpystonedalone.py +++ b/rpython/translator/goal/targetrpystonedalone.py @@ -60,13 +60,12 @@ # _____ Define and setup target ___ def target(*args): - return entry_point, None + return entry_point """ Why is this a stand-alone target? -The above target specifies None as the argument types list. -This is a case treated specially in the driver.py . If the list -of input types is empty, it is meant to be a list of strings, -actually implementing argv of the executable. +The above target specifies no argument types list. +This is a case treated specially in the driver.py . The only argument is meant +to be a list of strings, actually implementing argv of the executable. """ From noreply at buildbot.pypy.org Wed Jul 31 14:12:37 2013 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 31 Jul 2013 14:12:37 +0200 (CEST) Subject: [pypy-commit] pypy default: similarly, make specifying a jitpolicy in the target optional: apart from PyPy, Message-ID: <20130731121237.9A5281C0130@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r65863:8cf2c459003e Date: 2013-07-31 14:12 +0200 http://bitbucket.org/pypy/pypy/changeset/8cf2c459003e/ Log: similarly, make specifying a jitpolicy in the target optional: apart from PyPy, almost all targets use the default policy. diff --git a/rpython/jit/tl/targettlc.py b/rpython/jit/tl/targettlc.py --- a/rpython/jit/tl/targettlc.py +++ b/rpython/jit/tl/targettlc.py @@ -2,7 +2,6 @@ import py py.path.local(__file__) from rpython.jit.tl.tlc import interp, interp_nonjit, ConstantPool -from rpython.jit.codewriter.policy import JitPolicy from rpython.jit.backend.hlinfo import highleveljitinfo @@ -54,14 +53,10 @@ return decode_program(f.readall()) def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -def jitpolicy(driver): - """Returns the JIT policy to use when translating.""" - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/targettlr.py b/rpython/jit/tl/targettlr.py --- a/rpython/jit/tl/targettlr.py +++ b/rpython/jit/tl/targettlr.py @@ -29,15 +29,10 @@ return bytecode def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -from rpython.jit.codewriter.policy import JitPolicy - -def jitpolicy(driver): - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/tla/targettla.py b/rpython/jit/tl/tla/targettla.py --- a/rpython/jit/tl/tla/targettla.py +++ b/rpython/jit/tl/tla/targettla.py @@ -28,9 +28,6 @@ def target(driver, args): return entry_point, None -def jitpolicy(driver): - from rpython.jit.codewriter.policy import JitPolicy - return JitPolicy() # ____________________________________________________________ diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -354,8 +354,12 @@ """ Generate bytecodes for JIT and flow the JIT helper functions lltype version """ - get_policy = self.extra['jitpolicy'] - self.jitpolicy = get_policy(self) + from rpython.jit.codewriter.policy import JitPolicy + get_policy = self.extra.get('jitpolicy', None) + if get_policy is None: + self.jitpolicy = JitPolicy() + else: + self.jitpolicy = get_policy(self) # from rpython.jit.metainterp.warmspot import apply_jit apply_jit(self.translator, policy=self.jitpolicy, diff --git a/rpython/translator/goal/targetjitstandalone.py b/rpython/translator/goal/targetjitstandalone.py --- a/rpython/translator/goal/targetjitstandalone.py +++ b/rpython/translator/goal/targetjitstandalone.py @@ -3,7 +3,6 @@ """ from rpython.rlib import jit -from rpython.jit.codewriter.policy import JitPolicy driver = jit.JitDriver(greens = [], reds = 'auto') driver2 = jit.JitDriver(greens = [], reds = 'auto') @@ -40,7 +39,4 @@ return 0 def target(*args): - return entry_point, None - -def jitpolicy(driver): - return JitPolicy() + return entry_point diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -284,8 +284,6 @@ default_goal='compile') log_config(translateconfig, "translate.py configuration") if config.translation.jit: - if 'jitpolicy' not in targetspec_dic: - raise Exception('target has no jitpolicy defined.') if (translateconfig.goals != ['annotate'] and translateconfig.goals != ['rtype']): drv.set_extra_goals(['pyjitpl']) From noreply at buildbot.pypy.org Wed Jul 31 15:22:28 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 15:22:28 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: add jump()s Message-ID: <20130731132228.DFDB11C0130@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65864:ea39b8c3bddc Date: 2013-07-31 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/ea39b8c3bddc/ Log: add jump()s diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -96,6 +96,7 @@ p4 = getfield_gc(p1, descr=tzdescr) setfield_gc(p2, p0, descr=tzdescr) p5 = getfield_gc(p1, descr=tzdescr) + jump() """, """ [p0] p1 = same_as(p0) @@ -106,6 +107,7 @@ setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=P2Rdescr) p5 = getfield_gc(p1, descr=tzdescr) + jump() """) def test_invalidate_read_status_after_write_to_field(self): @@ -116,6 +118,7 @@ p3 = getfield_gc(p1, descr=tzdescr) setfield_gc(p2, p0, descr=tzdescr) p4 = getfield_gc(p1, descr=tzdescr) + jump() """, """ [p0] cond_call_stm_b(p0, descr=P2Rdescr) @@ -127,6 +130,7 @@ setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=P2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) + jump() """) def test_invalidate_read_status_after_write_to_array(self): @@ -137,6 +141,7 @@ p3 = getfield_gc(p1, descr=tzdescr) setfield_gc(p2, p0, descr=tzdescr) p4 = getfield_gc(p1, descr=tzdescr) + jump() """, """ [p0, i1, i2] cond_call_stm_b(p0, descr=P2Rdescr) @@ -148,6 +153,7 @@ setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=P2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) + jump() """) def test_rewrite_write_barrier_after_malloc(self): From noreply at buildbot.pypy.org Wed Jul 31 15:22:30 2013 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 31 Jul 2013 15:22:30 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c4: fix and expand test Message-ID: <20130731132230.8528F1C0359@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r65865:28cb2c7bb151 Date: 2013-07-31 14:02 +0200 http://bitbucket.org/pypy/pypy/changeset/28cb2c7bb151/ Log: fix and expand test diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -133,28 +133,33 @@ jump() """) - def test_invalidate_read_status_after_write_to_array(self): - self.check_rewrite(""" + def test_invalidate_read_status_after_write_array_interior(self): + ops = ['getarrayitem_gc', 'getinteriorfield_gc'] + original = """ [p0, i1, i2] - p1 = getarrayitem_gc(p0, i1, descr=adescr) - p2 = getarrayitem_gc(p0, i2, descr=adescr) + p1 = %s(p0, i1, descr=adescr) + p2 = %s(p0, i2, descr=adescr) p3 = getfield_gc(p1, descr=tzdescr) setfield_gc(p2, p0, descr=tzdescr) p4 = getfield_gc(p1, descr=tzdescr) jump() - """, """ + """ + rewritten = """ [p0, i1, i2] cond_call_stm_b(p0, descr=P2Rdescr) - p1 = getarrayitem_gc(p0, i1, descr=adescr) - p2 = getarrayitem_gc(p0, i2, descr=adescr) + p1 = %s(p0, i1, descr=adescr) + p2 = %s(p0, i2, descr=adescr) cond_call_stm_b(p1, descr=P2Rdescr) p3 = getfield_gc(p1, descr=tzdescr) - cond_call_stm_b(p2, descr=P2Rdescr) + cond_call_stm_b(p2, descr=P2Wdescr) setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=P2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) jump() - """) + """ + for op in ops: + self.check_rewrite(original % (op, op), + rewritten % (op, op)) def test_rewrite_write_barrier_after_malloc(self): self.check_rewrite(""" From noreply at buildbot.pypy.org Wed Jul 31 22:22:14 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 31 Jul 2013 22:22:14 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130731202214.3794B1C366F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r65866:a8ec1d207404 Date: 2013-07-31 22:19 +0200 http://bitbucket.org/pypy/pypy/changeset/a8ec1d207404/ Log: hg merge default diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -966,7 +966,7 @@ r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") if lib.color_content(color, r, g, b) == lib.ERR: raise error("Argument 1 was out of range. Check value of COLORS.") - return (r, g, b) + return (r[0], g[0], b[0]) def color_pair(n): @@ -1121,6 +1121,7 @@ term = ffi.NULL err = ffi.new("int *") if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] if err == 0: raise error("setupterm: could not find terminal") elif err == -1: diff --git a/pypy/doc/release-2.1.0.rst b/pypy/doc/release-2.1.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.1.0.rst @@ -0,0 +1,89 @@ +============================ +PyPy 2.1 - Considered ARMful +============================ + +We're pleased to announce PyPy 2.1, which targets version 2.7.3 of the Python +language. This is the first release with official support for ARM processors in the JIT. +This release also contains several bugfixes and performance improvements. + +You can download the PyPy 2.1 release here: + + http://pypy.org/download.html + +We would like to thank the `Raspberry Pi Foundation`_ for supporting the work +to finish PyPy's ARM support. + +.. _`Raspberry Pi Foundation`: http://www.raspberrypi.org + +The first beta of PyPy3 2.1, targetting version 3 of the Python language, was +just released, more details can be found `here`_. + +.. _`here`: http://morepypy.blogspot.com/2013/07/pypy3-21-beta-1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.1 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or Windows +32. This release also supports ARM machines running Linux 32bit - anything with +``ARMv6`` (like the Raspberry Pi) or ``ARMv7`` (like the Beagleboard, +Chromebook, Cubieboard, etc.) that supports ``VFPv3`` should work. Both +hard-float ``armhf/gnueabihf`` and soft-float ``armel/gnueabi`` builds are +provided. ``armhf`` builds for Raspbian are created using the Raspberry Pi +`custom cross-compilation toolchain `_ +based on ``gcc-arm-linux-gnueabihf`` and should work on ``ARMv6`` and +``ARMv7`` devices running Debian or Raspbian. ``armel`` builds are built +using the ``gcc-arm-linux-gnuebi`` toolchain provided by Ubuntu and +currently target ``ARMv7``. + +Windows 64 work is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.1 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +* JIT support for ARM, architecture versions 6 and 7, hard- and soft-float ABI + +* Stacklet support for ARM + +* Support for os.statvfs and os.fstatvfs on unix systems + +* Improved logging performance + +* Faster sets for objects + +* Interpreter improvements + +* During packaging, compile the CFFI based TK extension + +* Pickling of numpy arrays and dtypes + +* Subarrays for numpy + +* Bugfixes to numpy + +* Bugfixes to cffi and ctypes + +* Bugfixes to the x86 stacklet support + +* Fixed issue `1533`_: fix an RPython-level OverflowError for space.float_w(w_big_long_number). + +* Fixed issue `1552`_: GreenletExit should inherit from BaseException. + +* Fixed issue `1537`_: numpypy __array_interface__ + +* Fixed issue `1238`_: Writing to an SSL socket in PyPy sometimes failed with a "bad write retry" message. + +.. _`1533`: https://bugs.pypy.org/issue1533 +.. _`1552`: https://bugs.pypy.org/issue1552 +.. _`1537`: https://bugs.pypy.org/issue1537 +.. _`1238`: https://bugs.pypy.org/issue1238 + +Cheers, + +David Schneider for the PyPy team. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,9 @@ .. branch: fast-slowpath Added an abstraction for functions with a fast and slow path in the JIT. This speeds up list.append() and list.pop(). + +.. branch: curses_fixes + +.. branch: foldable-getarrayitem-indexerror +Constant-fold reading out of constant tuples in PyPy. + diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack(" 0 def test_ndmin(self): from numpypy import array diff --git a/pypy/module/micronumpy/tool/numready/main.py b/pypy/module/micronumpy/tool/numready/main.py --- a/pypy/module/micronumpy/tool/numready/main.py +++ b/pypy/module/micronumpy/tool/numready/main.py @@ -78,6 +78,11 @@ items.add(Item(name, kind, subitems)) return items +def get_version_str(python): + args = [python, '-c', 'import sys; print sys.version'] + lines = subprocess.check_output(args).splitlines() + return lines[0] + def split(lst): SPLIT = 5 lgt = len(lst) // SPLIT + 1 @@ -93,6 +98,7 @@ def main(argv): cpy_items = find_numpy_items("/usr/bin/python") pypy_items = find_numpy_items(argv[1], "numpypy") + ver = get_version_str(argv[1]) all_items = [] msg = "{:d}/{:d} names".format(len(pypy_items), len(cpy_items)) + " " @@ -113,7 +119,8 @@ env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)) ) - html = env.get_template("page.html").render(all_items=split(sorted(all_items)), msg=msg) + html = env.get_template("page.html").render(all_items=split(sorted(all_items)), + msg=msg, ver=ver) if len(argv) > 2: with open(argv[2], 'w') as f: f.write(html.encode("utf-8")) diff --git a/pypy/module/micronumpy/tool/numready/page.html b/pypy/module/micronumpy/tool/numready/page.html --- a/pypy/module/micronumpy/tool/numready/page.html +++ b/pypy/module/micronumpy/tool/numready/page.html @@ -34,6 +34,7 @@

NumPyPy Status

+

Version: {{ ver }}

Overall: {{ msg }}

diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -209,6 +209,22 @@ opnames = log.opnames(loop.allops()) assert opnames.count('new_with_vtable') == 0 + def test_constfold_tuple(self): + code = """if 1: + tup = tuple(range(10000)) + l = [1, 2, 3, 4, 5, 6, "a"] + def main(n): + while n > 0: + sub = tup[1] # ID: getitem + l[1] = n # kill cache of tup[1] + n -= sub + """ + log = self.run(code, [1000]) + loop, = log.loops_by_filename(self.filepath) + ops = loop.ops_by_id('getitem', include_guard_not_invalidated=False) + assert log.opnames(ops) == [] + + def test_specialised_tuple(self): def main(n): import pypyjit diff --git a/pypy/module/test_lib_pypy/test_curses.py b/pypy/module/test_lib_pypy/test_curses.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_curses.py @@ -0,0 +1,48 @@ +from lib_pypy import _curses + +import pytest + +lib = _curses.lib + + +def test_color_content(monkeypatch): + def lib_color_content(color, r, g, b): + r[0], g[0], b[0] = 42, 43, 44 + return lib.OK + + monkeypatch.setattr(_curses, '_ensure_initialised_color', lambda: None) + monkeypatch.setattr(lib, 'color_content', lib_color_content) + + assert _curses.color_content(None) == (42, 43, 44) + + +def test_setupterm(monkeypatch): + def make_setupterm(err_no): + def lib_setupterm(term, fd, err): + err[0] = err_no + + return lib.ERR + + return lib_setupterm + + monkeypatch.setattr(_curses, '_initialised_setupterm', False) + monkeypatch.setattr(lib, 'setupterm', make_setupterm(0)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminal" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(-1)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "could not find terminfo database" in exc_info.value.args[0] + + monkeypatch.setattr(lib, 'setupterm', make_setupterm(42)) + + with pytest.raises(Exception) as exc_info: + _curses.setupterm() + + assert "unknown error" in exc_info.value.args[0] diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -472,6 +472,14 @@ return space.wrap(1) def coerce(space, w_obj1, w_obj2): + w_res = space.try_coerce(w_obj1, w_obj2) + if w_res is None: + raise OperationError(space.w_TypeError, + space.wrap("coercion failed")) + return w_res + + def try_coerce(space, w_obj1, w_obj2): + """Returns a wrapped 2-tuple or a real None if it failed.""" w_typ1 = space.type(w_obj1) w_typ2 = space.type(w_obj2) w_left_src, w_left_impl = space.lookup_in_type_where(w_typ1, '__coerce__') @@ -488,8 +496,7 @@ if w_res is None or space.is_w(w_res, space.w_None): w_res = _invoke_binop(space, w_right_impl, w_obj2, w_obj1) if w_res is None or space.is_w(w_res, space.w_None): - raise OperationError(space.w_TypeError, - space.wrap("coercion failed")) + return None if (not space.isinstance_w(w_res, space.w_tuple) or space.len_w(w_res) != 2): raise OperationError(space.w_TypeError, diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -702,11 +702,13 @@ find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') class ListStrategy(object): - sizehint = -1 def __init__(self, space): self.space = space + def get_sizehint(self): + return -1 + def init_from_list_w(self, w_list, list_w): raise NotImplementedError @@ -894,7 +896,7 @@ else: strategy = self.space.fromcache(ObjectListStrategy) - storage = strategy.get_empty_storage(self.sizehint) + storage = strategy.get_empty_storage(self.get_sizehint()) w_list.strategy = strategy w_list.lstorage = storage @@ -974,6 +976,9 @@ self.sizehint = sizehint ListStrategy.__init__(self, space) + def get_sizehint(self): + return self.sizehint + def _resize_hint(self, w_list, hint): assert hint >= 0 self.sizehint = hint diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -19,6 +19,7 @@ BUILDERS = [ 'own-linux-x86-32', 'own-linux-x86-64', + 'own-linux-armhf', # 'own-macosx-x86-32', # 'pypy-c-app-level-linux-x86-32', # 'pypy-c-app-level-linux-x86-64', diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -88,7 +88,7 @@ else: assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): - assert nos == [9, 10, 47] + assert nos == [0, 1, 47] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -187,10 +187,6 @@ # with Voids removed raise NotImplementedError - def methdescrof(self, SELFTYPE, methname): - # must return a subclass of history.AbstractMethDescr - raise NotImplementedError - def typedescrof(self, TYPE): raise NotImplementedError diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -502,6 +502,7 @@ # 3. raising call and wrong guard_exception # 4. raising call and guard_no_exception # 5. non raising call and guard_exception +# (6. test of a cond_call, always non-raising and guard_no_exception) class BaseCallOperation(test_random.AbstractOperation): def non_raising_func_code(self, builder, r): @@ -648,6 +649,34 @@ builder.guard_op = op builder.loop.operations.append(op) +# 6. a conditional call (for now always with no exception raised) +class CondCallOperation(BaseCallOperation): + def produce_into(self, builder, r): + fail_subset = builder.subset_of_intvars(r) + v_cond = builder.get_bool_var(r) + subset = builder.subset_of_intvars(r)[:4] + for i in range(len(subset)): + if r.random() < 0.35: + subset[i] = ConstInt(r.random_integer()) + # + seen = [] + def call_me(*args): + if len(seen) == 0: + seen.append(args) + else: + assert seen[0] == args + # + TP = lltype.FuncType([lltype.Signed] * len(subset), lltype.Void) + ptr = llhelper(lltype.Ptr(TP), call_me) + c_addr = ConstAddr(llmemory.cast_ptr_to_adr(ptr), builder.cpu) + args = [v_cond, c_addr] + subset + descr = self.getcalldescr(builder, TP) + self.put(builder, args, descr) + op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None, + descr=builder.getfaildescr()) + op.setfailargs(fail_subset) + builder.loop.operations.append(op) + # ____________________________________________________________ OPERATIONS = test_random.OPERATIONS[:] @@ -684,6 +713,7 @@ OPERATIONS.append(RaisingCallOperationGuardNoException(rop.CALL)) OPERATIONS.append(RaisingCallOperationWrongGuardException(rop.CALL)) OPERATIONS.append(CallOperationException(rop.CALL)) + OPERATIONS.append(CondCallOperation(rop.COND_CALL)) OPERATIONS.append(GuardNonNullClassOperation(rop.GUARD_NONNULL_CLASS)) LLtypeOperationBuilder.OPERATIONS = OPERATIONS diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -14,7 +14,7 @@ from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken from rpython.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, - gpr_reg_mgr_cls, xmm_reg_mgr_cls, _register_arguments) + gpr_reg_mgr_cls, xmm_reg_mgr_cls) from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) from rpython.jit.backend.x86.arch import (FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, @@ -154,17 +154,24 @@ come. """ mc = codebuf.MachineCodeBlockWrapper() - self._push_all_regs_to_frame(mc, [], supports_floats, callee_only) + # copy registers to the frame, with the exception of the + # 'cond_call_register_arguments' and eax, because these have already + # been saved by the caller. Note that this is not symmetrical: + # these 5 registers are saved by the caller but restored here at + # the end of this function. + self._push_all_regs_to_frame(mc, cond_call_register_arguments + [eax], + supports_floats, callee_only) if IS_X86_64: - mc.SUB(esp, imm(WORD)) + mc.SUB(esp, imm(WORD)) # alignment self.set_extra_stack_depth(mc, 2 * WORD) + # the arguments are already in the correct registers else: - # we want space for 3 arguments + call + alignment - # the caller is responsible for putting arguments in the right spot + # we want space for 4 arguments + call + alignment mc.SUB(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 8 * WORD) + # store the arguments at the correct place in the stack for i in range(4): - mc.MOV_sr(i * WORD, _register_arguments[i].value) + mc.MOV_sr(i * WORD, cond_call_register_arguments[i].value) mc.CALL(eax) if IS_X86_64: mc.ADD(esp, imm(WORD)) @@ -172,8 +179,8 @@ mc.ADD(esp, imm(WORD * 7)) self.set_extra_stack_depth(mc, 0) self._reload_frame_if_necessary(mc, align_stack=True) - self._pop_all_regs_from_frame(mc, [], supports_floats, - callee_only) + self._pop_all_regs_from_frame(mc, [], supports_floats, callee_only) + self.pop_gcmap(mc) # push_gcmap(store=True) done by the caller mc.RET() return mc.materialize(self.cpu.asmmemmgr, []) @@ -1755,7 +1762,7 @@ regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - for i, gpr in enumerate(regs): + for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] mc.MOV_br(v * WORD + base_ofs, gpr.value) @@ -1777,7 +1784,7 @@ regs = gpr_reg_mgr_cls.save_around_call_regs else: regs = gpr_reg_mgr_cls.all_regs - for i, gpr in enumerate(regs): + for gpr in regs: if gpr not in ignored_regs: v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] mc.MOV_rb(gpr.value, v * WORD + base_ofs) @@ -2161,11 +2168,33 @@ def label(self): self._check_frame_depth_debug(self.mc) - def cond_call(self, op, gcmap, cond_loc, call_loc): - self.mc.TEST(cond_loc, cond_loc) + def cond_call(self, op, gcmap, loc_cond, imm_func, arglocs): + self.mc.TEST(loc_cond, loc_cond) self.mc.J_il8(rx86.Conditions['Z'], 0) # patched later jmp_adr = self.mc.get_relative_pos() + # self.push_gcmap(self.mc, gcmap, store=True) + # + # first save away the 4 registers from 'cond_call_register_arguments' + # plus the register 'eax' + base_ofs = self.cpu.get_baseofs_of_frame_field() + should_be_saved = self._regalloc.rm.reg_bindings.values() + for gpr in cond_call_register_arguments + [eax]: + if gpr not in should_be_saved: + continue + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + self.mc.MOV_br(v * WORD + base_ofs, gpr.value) + # + # load the 0-to-4 arguments into these registers + from rpython.jit.backend.x86.jump import remap_frame_layout + remap_frame_layout(self, arglocs, + cond_call_register_arguments[:len(arglocs)], + X86_64_SCRATCH_REG if IS_X86_64 else None) + # + # load the constant address of the function to call into eax + self.mc.MOV(eax, imm_func) + # + # figure out which variant of cond_call_slowpath to call, and call it callee_only = False floats = False if self._regalloc is not None: @@ -2178,11 +2207,13 @@ floats = True cond_call_adr = self.cond_call_slowpath[floats * 2 + callee_only] self.mc.CALL(imm(cond_call_adr)) - self.pop_gcmap(self.mc) - # never any result value + # restoring the registers saved above, and doing pop_gcmap(), is left + # to the cond_call_slowpath helper. We never have any result value. offset = self.mc.get_relative_pos() - jmp_adr assert 0 < offset <= 127 self.mc.overwrite(jmp_adr-1, chr(offset)) + # XXX if the next operation is a GUARD_NO_EXCEPTION, we should + # somehow jump over it too in the fast path def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, gcmap): assert size & (WORD-1) == 0 # must be correctly aligned @@ -2348,5 +2379,7 @@ os.write(2, '[x86/asm] %s\n' % msg) raise NotImplementedError(msg) +cond_call_register_arguments = [edi, esi, edx, ecx] + class BridgeAlreadyCompiled(Exception): pass diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -119,8 +119,6 @@ for _i, _reg in enumerate(gpr_reg_mgr_cls.all_regs): gpr_reg_mgr_cls.all_reg_indexes[_reg.value] = _i -_register_arguments = [edi, esi, edx, ecx] - class RegAlloc(BaseRegalloc): @@ -801,23 +799,26 @@ consider_cond_call_gc_wb_array = consider_cond_call_gc_wb def consider_cond_call(self, op): + # A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments' + # contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA. + # We must make sure that edi and esi do not contain GC pointers. + if IS_X86_32 and self.assembler._is_asmgcc(): + for box, loc in self.rm.reg_bindings.items(): + if (loc == edi or loc == esi) and box.type == REF: + self.rm.force_spill_var(box) + assert box not in self.rm.reg_bindings + # assert op.result is None args = op.getarglist() - assert 2 <= len(args) <= 4 + 2 - tmpbox = TempBox() - self.rm.force_allocate_reg(tmpbox, selected_reg=eax) + assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments + loc_cond = self.make_sure_var_in_reg(args[0], args) v = args[1] assert isinstance(v, Const) - imm = self.rm.convert_to_imm(v) - self.assembler.regalloc_mov(imm, eax) - args_so_far = [tmpbox] - for i in range(2, len(args)): - reg = _register_arguments[i - 2] - self.make_sure_var_in_reg(args[i], args_so_far, selected_reg=reg) - args_so_far.append(args[i]) - loc_cond = self.make_sure_var_in_reg(args[0], args) - self.assembler.cond_call(op, self.get_gcmap([eax]), loc_cond, eax) - self.rm.possibly_free_var(tmpbox) + imm_func = self.rm.convert_to_imm(v) + arglocs = [self.loc(args[i]) for i in range(2, len(args))] + gcmap = self.get_gcmap() + self.rm.possibly_free_var(args[0]) + self.assembler.cond_call(op, gcmap, loc_cond, imm_func, arglocs) def consider_call_malloc_nursery(self, op): size_box = op.getarg(0) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -34,7 +34,6 @@ return 'int' # singlefloats are stored in an int if TYPE in (lltype.Float, lltype.SingleFloat): raise NotImplementedError("type %s not supported" % TYPE) - # XXX fix this for oo... if (TYPE != llmemory.Address and rffi.sizeof(TYPE) > rffi.sizeof(lltype.Signed)): if supports_longlong and TYPE is not lltype.LongFloat: @@ -168,18 +167,11 @@ def __init__(self, identifier=None): self.identifier = identifier # for testing + class BasicFailDescr(AbstractFailDescr): def __init__(self, identifier=None): self.identifier = identifier # for testing -class AbstractMethDescr(AbstractDescr): - # the base class of the result of cpu.methdescrof() - jitcodes = None - def setup(self, jitcodes): - # jitcodes maps { runtimeClass -> jitcode for runtimeClass.methname } - self.jitcodes = jitcodes - def get_jitcode_for_class(self, oocls): - return self.jitcodes[oocls] class Const(AbstractValue): __slots__ = () diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -195,11 +195,10 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fieldstate) > value.getlength(): + raise BadVirtualState for i in range(len(self.fieldstate)): - try: - v = value.get_item_value(i) - except IndexError: - raise BadVirtualState + v = value.get_item_value(i) s = self.fieldstate[i] if s.position > self.position: s.enum_forced_boxes(boxes, v, optimizer) @@ -269,13 +268,13 @@ raise BadVirtualState if not value.is_virtual(): raise BadVirtualState + if len(self.fielddescrs) > len(value._items): + raise BadVirtualState p = 0 for i in range(len(self.fielddescrs)): for j in range(len(self.fielddescrs[i])): try: v = value._items[i][self.fielddescrs[i][j]] - except IndexError: - raise BadVirtualState except KeyError: raise BadVirtualState s = self.fieldstate[p] diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -69,6 +69,28 @@ self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, getarrayitem_gc=0, getarrayitem_gc_pure=1) + def test_array_index_error(self): + class X(object): + _immutable_fields_ = ["y[*]"] + + def __init__(self, x): + self.y = x + + def get(self, index): + try: + return self.y[index] + except IndexError: + return -41 + + def f(index): + l = [1, 2, 3, 4] + l[2] = 30 + a = escape(X(l)) + return a.get(index) + res = self.interp_operations(f, [2], listops=True) + assert res == 30 + self.check_operations_history(getfield_gc=0, getfield_gc_pure=1, + getarrayitem_gc=0, getarrayitem_gc_pure=1) def test_array_in_immutable(self): class X(object): diff --git a/rpython/jit/tl/targettlc.py b/rpython/jit/tl/targettlc.py --- a/rpython/jit/tl/targettlc.py +++ b/rpython/jit/tl/targettlc.py @@ -2,7 +2,6 @@ import py py.path.local(__file__) from rpython.jit.tl.tlc import interp, interp_nonjit, ConstantPool -from rpython.jit.codewriter.policy import JitPolicy from rpython.jit.backend.hlinfo import highleveljitinfo @@ -54,14 +53,10 @@ return decode_program(f.readall()) def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -def jitpolicy(driver): - """Returns the JIT policy to use when translating.""" - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/targettlr.py b/rpython/jit/tl/targettlr.py --- a/rpython/jit/tl/targettlr.py +++ b/rpython/jit/tl/targettlr.py @@ -29,15 +29,10 @@ return bytecode def target(driver, args): - return entry_point, None + return entry_point # ____________________________________________________________ -from rpython.jit.codewriter.policy import JitPolicy - -def jitpolicy(driver): - return JitPolicy() - if __name__ == '__main__': import sys sys.exit(entry_point(sys.argv)) diff --git a/rpython/jit/tl/tla/targettla.py b/rpython/jit/tl/tla/targettla.py --- a/rpython/jit/tl/tla/targettla.py +++ b/rpython/jit/tl/tla/targettla.py @@ -28,9 +28,6 @@ def target(driver, args): return entry_point, None -def jitpolicy(driver): - from rpython.jit.codewriter.policy import JitPolicy - return JitPolicy() # ____________________________________________________________ diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -729,6 +729,10 @@ # - frame address (actually the addr of the retaddr of the current function; # that's the last word of the frame in memory) # +# On 64 bits, it is an array of 7 values instead of 5: +# +# - %rbx, %r12, %r13, %r14, %r15, %rbp; and the frame address +# if IS_64_BITS: CALLEE_SAVED_REGS = 6 diff --git a/rpython/rlib/longlong2float.py b/rpython/rlib/longlong2float.py --- a/rpython/rlib/longlong2float.py +++ b/rpython/rlib/longlong2float.py @@ -68,14 +68,12 @@ uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__uint2singlefloat") + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - oo_primitive="pypy__singlefloat2uint") + _nowrapper=True, elidable_function=True, sandboxsafe=True) class Float2LongLongEntry(ExtRegistryEntry): diff --git a/rpython/rlib/rlocale.py b/rpython/rlib/rlocale.py --- a/rpython/rlib/rlocale.py +++ b/rpython/rlib/rlocale.py @@ -193,11 +193,11 @@ raise LocaleError("unsupported locale setting") return rffi.charp2str(ll_result) -isalpha = external('isalpha', [rffi.INT], rffi.INT, oo_primitive='locale_isalpha') -isupper = external('isupper', [rffi.INT], rffi.INT, oo_primitive='locale_isupper') -islower = external('islower', [rffi.INT], rffi.INT, oo_primitive='locale_islower') -tolower = external('tolower', [rffi.INT], rffi.INT, oo_primitive='locale_tolower') -isalnum = external('isalnum', [rffi.INT], rffi.INT, oo_primitive='locale_isalnum') +isalpha = external('isalpha', [rffi.INT], rffi.INT) +isupper = external('isupper', [rffi.INT], rffi.INT) +islower = external('islower', [rffi.INT], rffi.INT) +tolower = external('tolower', [rffi.INT], rffi.INT) +isalnum = external('isalnum', [rffi.INT], rffi.INT) if HAVE_LANGINFO: _nl_langinfo = external('nl_langinfo', [rffi.INT], rffi.CCHARP) diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -185,11 +185,8 @@ SetEndOfFile = rffi.llexternal('SetEndOfFile', [HANDLE], BOOL, compilation_info=_eci) - # HACK: These implementations are specific to MSVCRT and the C backend. - # When generating on CLI or JVM, these are patched out. - # See PyPyTarget.target() in targetpypystandalone.py def _setfd_binary(fd): - #Allow this to succeed on invalid fd's + # Allow this to succeed on invalid fd's if rposix.is_valid_fd(fd): _setmode(fd, os.O_BINARY) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -62,8 +62,8 @@ compilation_info=ExternalCompilationInfo(), sandboxsafe=False, threadsafe='auto', _nowrapper=False, calling_conv='c', - oo_primitive=None, elidable_function=False, - macro=None, random_effects_on_gcobjs='auto'): + elidable_function=False, macro=None, + random_effects_on_gcobjs='auto'): """Build an external function that will invoke the C function 'name' with the given 'args' types and 'result' type. @@ -97,8 +97,6 @@ if elidable_function: _callable._elidable_function_ = True kwds = {} - if oo_primitive: - kwds['oo_primitive'] = oo_primitive has_callback = False for ARG in args: @@ -651,6 +649,10 @@ # char * CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True})) +# const char * +CONST_CCHARP = lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True, + 'render_as_const': True})) + # wchar_t * CWCHARP = lltype.Ptr(lltype.Array(lltype.UniChar, hints={'nolength': True})) diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -247,27 +247,22 @@ v_lst, v_index = hop.inputargs(r_lst, Signed) if checkidx: hop.exception_is_here() + spec = dum_checkidx else: + spec = dum_nocheck hop.exception_cannot_occur() - if hop.args_s[0].listdef.listitem.mutated or checkidx: - if hop.args_s[1].nonneg: - llfn = ll_getitem_nonneg - else: - llfn = ll_getitem - if checkidx: - spec = dum_checkidx - else: - spec = dum_nocheck - c_func_marker = hop.inputconst(Void, spec) - v_res = hop.gendirectcall(llfn, c_func_marker, v_lst, v_index) + if hop.args_s[0].listdef.listitem.mutated: + basegetitem = ll_getitem_fast else: - # this is the 'foldable' version, which is not used when - # we check for IndexError - if hop.args_s[1].nonneg: - llfn = ll_getitem_foldable_nonneg - else: - llfn = ll_getitem_foldable - v_res = hop.gendirectcall(llfn, v_lst, v_index) + basegetitem = ll_getitem_foldable_nonneg + + if hop.args_s[1].nonneg: + llfn = ll_getitem_nonneg + else: + llfn = ll_getitem + c_func_marker = hop.inputconst(Void, spec) + c_basegetitem = hop.inputconst(Void, basegetitem) + v_res = hop.gendirectcall(llfn, c_func_marker, c_basegetitem, v_lst, v_index) return r_lst.recast(hop.llops, v_res) rtype_getitem_key = rtype_getitem @@ -654,16 +649,16 @@ i += 1 length_1_i -= 1 -def ll_getitem_nonneg(func, l, index): +def ll_getitem_nonneg(func, basegetitem, l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") if func is dum_checkidx: if index >= l.ll_length(): raise IndexError - return l.ll_getitem_fast(index) + return basegetitem(l, index) ll_getitem_nonneg._always_inline_ = True # no oopspec -- the function is inlined by the JIT -def ll_getitem(func, l, index): +def ll_getitem(func, basegetitem, l, index): if func is dum_checkidx: length = l.ll_length() # common case: 0 <= index < length if r_uint(index) >= r_uint(length): @@ -680,21 +675,18 @@ if index < 0: index += l.ll_length() ll_assert(index >= 0, "negative list getitem index out of bound") + return basegetitem(l, index) +# no oopspec -- the function is inlined by the JIT + +def ll_getitem_fast(l, index): return l.ll_getitem_fast(index) -# no oopspec -- the function is inlined by the JIT +ll_getitem_fast._always_inline_ = True def ll_getitem_foldable_nonneg(l, index): ll_assert(index >= 0, "unexpectedly negative list getitem index") return l.ll_getitem_fast(index) ll_getitem_foldable_nonneg.oopspec = 'list.getitem_foldable(l, index)' -def ll_getitem_foldable(l, index): - if index < 0: - index += l.ll_length() - return ll_getitem_foldable_nonneg(l, index) -ll_getitem_foldable._always_inline_ = True -# no oopspec -- the function is inlined by the JIT - def ll_setitem_nonneg(func, l, index, newitem): ll_assert(index >= 0, "unexpectedly negative list setitem index") if func is dum_checkidx: diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -58,7 +58,6 @@ self.classdef_to_pytypeobject = {} self.concrete_calltables = {} self.class_pbc_attributes = {} - self.oo_meth_impls = {} self.cache_dummy_values = {} self.lltype2vtable = {} self.typererrors = [] diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -14,15 +14,19 @@ from rpython.translator.translator import TranslationContext -# undo the specialization parameter +# undo the specialization parameters for n1 in 'get set del'.split(): + if n1 == "get": + extraarg = "ll_getitem_fast, " + else: + extraarg = "" for n2 in '', '_nonneg': name = 'll_%sitem%s' % (n1, n2) globals()['_' + name] = globals()[name] exec """if 1: def %s(*args): - return _%s(dum_checkidx, *args) -""" % (name, name) + return _%s(dum_checkidx, %s*args) +""" % (name, name, extraarg) del n1, n2, name @@ -1400,7 +1404,7 @@ block = graph.startblock op = block.operations[-1] assert op.opname == 'direct_call' - func = op.args[0].value._obj._callable + func = op.args[2].value assert ('foldable' in func.func_name) == \ ("y[*]" in immutable_fields) @@ -1511,8 +1515,8 @@ block = graph.startblock lst1_getitem_op = block.operations[-3] # XXX graph fishing lst2_getitem_op = block.operations[-2] - func1 = lst1_getitem_op.args[0].value._obj._callable - func2 = lst2_getitem_op.args[0].value._obj._callable + func1 = lst1_getitem_op.args[2].value + func2 = lst2_getitem_op.args[2].value assert func1.oopspec == 'list.getitem_foldable(l, index)' assert not hasattr(func2, 'oopspec') diff --git a/rpython/translator/backendopt/test/test_removenoops.py b/rpython/translator/backendopt/test/test_removenoops.py --- a/rpython/translator/backendopt/test/test_removenoops.py +++ b/rpython/translator/backendopt/test/test_removenoops.py @@ -97,9 +97,8 @@ def test_remove_unaryops(): - # We really want to use remove_unaryops for things like ooupcast and - # oodowncast in dynamically typed languages, but it's easier to test - # it with operations on ints here. + # We really want to use remove_unaryops for more complex operations, but + # it's easier to test it with operations on ints here. def f(x): i = llop.int_invert(lltype.Signed, x) i = llop.int_add(lltype.Signed, x, 1) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -358,6 +358,8 @@ self.fullptrtypename = 'void *@' else: self.fullptrtypename = self.itemtypename.replace('@', '*@') + if ARRAY._hints.get("render_as_const"): + self.fullptrtypename = 'const ' + self.fullptrtypename def setup(self): """Array loops are forbidden by ForwardReference.become() because diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -1,5 +1,6 @@ import py from rpython.rtyper.lltypesystem.lltype import * +from rpython.rtyper.lltypesystem import rffi from rpython.translator.c.test.test_genc import compile from rpython.tool.sourcetools import func_with_new_name @@ -314,14 +315,14 @@ from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import rffi, llmemory, lltype P = lltype.Ptr(lltype.FixedSizeArray(lltype.Char, 1)) - + def f(): a = llstr("xyz") b = (llmemory.cast_ptr_to_adr(a) + llmemory.offsetof(STR, 'chars') + llmemory.itemoffsetof(STR.chars, 0)) buf = rffi.cast(rffi.VOIDP, b) return buf[2] - + fn = self.getcompiled(f, []) res = fn() assert res == 'z' @@ -941,3 +942,21 @@ assert fn(0) == 10 assert fn(1) == 10 + 521 assert fn(2) == 10 + 34 + + def test_const_char_star(self): + from rpython.translator.tool.cbuild import ExternalCompilationInfo + + eci = ExternalCompilationInfo(includes=["stdlib.h"]) + atoi = rffi.llexternal('atoi', [rffi.CONST_CCHARP], rffi.INT, + compilation_info=eci) + + def f(n): + s = malloc(rffi.CCHARP.TO, 2, flavor='raw') + s[0] = '9' + s[1] = '\0' + res = atoi(rffi.cast(rffi.CONST_CCHARP, s)) + free(s, flavor='raw') + return res + + fn = self.getcompiled(f, [int]) + assert fn(0) == 9 diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -354,8 +354,12 @@ """ Generate bytecodes for JIT and flow the JIT helper functions lltype version """ - get_policy = self.extra['jitpolicy'] - self.jitpolicy = get_policy(self) + from rpython.jit.codewriter.policy import JitPolicy + get_policy = self.extra.get('jitpolicy', None) + if get_policy is None: + self.jitpolicy = JitPolicy() + else: + self.jitpolicy = get_policy(self) # from rpython.jit.metainterp.warmspot import apply_jit apply_jit(self.translator, policy=self.jitpolicy, @@ -544,9 +548,14 @@ try: entry_point, inputtypes, policy = spec + except TypeError: + # not a tuple at all + entry_point = spec + inputtypes = policy = None except ValueError: + policy = None entry_point, inputtypes = spec - policy = None + driver.setup(entry_point, inputtypes, policy=policy, diff --git a/rpython/translator/goal/targetjitstandalone.py b/rpython/translator/goal/targetjitstandalone.py --- a/rpython/translator/goal/targetjitstandalone.py +++ b/rpython/translator/goal/targetjitstandalone.py @@ -3,7 +3,6 @@ """ from rpython.rlib import jit -from rpython.jit.codewriter.policy import JitPolicy driver = jit.JitDriver(greens = [], reds = 'auto') driver2 = jit.JitDriver(greens = [], reds = 'auto') @@ -28,7 +27,9 @@ def entry_point(argv): if len(argv) < 3: - print "Usage: jitstandalone " + print "Usage: jitstandalone " + print "runs a total of '2 * count1 * count2' iterations" + return 0 count1 = int(argv[1]) count2 = int(argv[2]) s = 0 @@ -38,7 +39,4 @@ return 0 def target(*args): - return entry_point, None - -def jitpolicy(driver): - return JitPolicy() + return entry_point diff --git a/rpython/translator/goal/targetnopstandalone.py b/rpython/translator/goal/targetnopstandalone.py --- a/rpython/translator/goal/targetnopstandalone.py +++ b/rpython/translator/goal/targetnopstandalone.py @@ -19,4 +19,4 @@ # _____ Define and setup target ___ def target(*args): - return entry_point, None + return entry_point diff --git a/rpython/translator/goal/targetrpystonedalone.py b/rpython/translator/goal/targetrpystonedalone.py --- a/rpython/translator/goal/targetrpystonedalone.py +++ b/rpython/translator/goal/targetrpystonedalone.py @@ -60,13 +60,12 @@ # _____ Define and setup target ___ def target(*args): - return entry_point, None + return entry_point """ Why is this a stand-alone target? -The above target specifies None as the argument types list. -This is a case treated specially in the driver.py . If the list -of input types is empty, it is meant to be a list of strings, -actually implementing argv of the executable. +The above target specifies no argument types list. +This is a case treated specially in the driver.py . The only argument is meant +to be a list of strings, actually implementing argv of the executable. """ diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -284,8 +284,6 @@ default_goal='compile') log_config(translateconfig, "translate.py configuration") if config.translation.jit: - if 'jitpolicy' not in targetspec_dic: - raise Exception('target has no jitpolicy defined.') if (translateconfig.goals != ['annotate'] and translateconfig.goals != ['rtype']): drv.set_extra_goals(['pyjitpl'])