From pedronis at openend.se Tue Sep 1 10:40:07 2009 From: pedronis at openend.se (Samuele Pedroni) Date: Tue, 01 Sep 2009 10:40:07 +0200 Subject: [pypy-dev] pyjitpl5 has been merged with trunk, use trunk for mainline jit development Message-ID: <4A9CDDE7.5070407@openend.se> as discussed in the last sprint. regards From pedronis at openend.se Wed Sep 2 20:17:36 2009 From: pedronis at openend.se (Samuele Pedroni) Date: Wed, 02 Sep 2009 20:17:36 +0200 Subject: [pypy-dev] [pypy-svn] r67433 - in pypy/branch/spine-of-frames/pypy: interpreter interpreter/test module/sys module/sys/test In-Reply-To: <20090902155335.AB58F1683D3@codespeak.net> References: <20090902155335.AB58F1683D3@codespeak.net> Message-ID: <4A9EB6C0.9030809@openend.se> this is messy enough that it really need unit tests, we don't have enough other tests to be sure they check what's going on here. cfbolz at codespeak.net wrote: > Author: cfbolz > Date: Wed Sep 2 17:53:35 2009 > New Revision: 67433 > > Modified: > pypy/branch/spine-of-frames/pypy/interpreter/executioncontext.py > pypy/branch/spine-of-frames/pypy/interpreter/generator.py > pypy/branch/spine-of-frames/pypy/interpreter/pyframe.py > pypy/branch/spine-of-frames/pypy/interpreter/pytraceback.py > pypy/branch/spine-of-frames/pypy/interpreter/test/test_zzpickle_and_slow.py > pypy/branch/spine-of-frames/pypy/module/sys/test/test_sysmodule.py > pypy/branch/spine-of-frames/pypy/module/sys/vm.py > Log: > try to have frames not escape via the f_backs of the next "real" frame. > > > Modified: pypy/branch/spine-of-frames/pypy/interpreter/executioncontext.py > ============================================================================== > --- pypy/branch/spine-of-frames/pypy/interpreter/executioncontext.py (original) > +++ pypy/branch/spine-of-frames/pypy/interpreter/executioncontext.py Wed Sep 2 17:53:35 2009 > @@ -40,13 +40,13 @@ > def gettopframe_nohidden(self): > frame = self.gettopframe() > while frame and frame.hide(): > - frame = frame.f_back > + frame = frame.f_back() > return frame > > def getnextframe_nohidden(frame): > - frame = frame.f_back > + frame = frame.f_back() > while frame and frame.hide(): > - frame = frame.f_back > + frame = frame.f_back() > return frame > getnextframe_nohidden = staticmethod(getnextframe_nohidden) > > @@ -56,8 +56,8 @@ > self.space.wrap("maximum recursion depth exceeded")) > self.framestackdepth += 1 > # > + frame.f_back_some = self.some_frame > curtopframe = self.gettopframe() > - frame.f_back = curtopframe > if curtopframe is not None: > curtopframe.f_forward = frame > if not we_are_jitted(): > @@ -68,11 +68,11 @@ > self._trace(frame, 'leaveframe', self.space.w_None) > > #assert frame is self.gettopframe() --- slowish > - f_back = frame.f_back > + f_back = frame.f_back() > if f_back is not None: > f_back.f_forward = None > if not we_are_jitted() or self.some_frame is frame: > - self.some_frame = f_back > + self.some_frame = frame.f_back_some > self.framestackdepth -= 1 > > if self.w_tracefunc is not None and not frame.hide(): > @@ -134,7 +134,7 @@ > while index > 0: > index -= 1 > lst[index] = f > - f = f.f_back > + f = f.f_back() > assert f is None > return lst > # coroutine: I think this is all, folks! > > Modified: pypy/branch/spine-of-frames/pypy/interpreter/generator.py > ============================================================================== > --- pypy/branch/spine-of-frames/pypy/interpreter/generator.py (original) > +++ pypy/branch/spine-of-frames/pypy/interpreter/generator.py Wed Sep 2 17:53:35 2009 > @@ -64,7 +64,7 @@ > else: > return w_result # YIELDed > finally: > - self.frame.f_back = None > + self.frame.f_back_some = None > self.running = False > > def descr_throw(self, w_type, w_val=None, w_tb=None): > > Modified: pypy/branch/spine-of-frames/pypy/interpreter/pyframe.py > ============================================================================== > --- pypy/branch/spine-of-frames/pypy/interpreter/pyframe.py (original) > +++ pypy/branch/spine-of-frames/pypy/interpreter/pyframe.py Wed Sep 2 17:53:35 2009 > @@ -33,14 +33,49 @@ > * 'builtin' is the attached built-in module > * 'valuestack_w', 'blockstack', control the interpretation > """ > + > + """ > + explanation of the f_back handling: > + ----------------------------------- > + > + in the non-JIT case, the frames simply form a doubly linked list via the > + attributes f_back_some and f_forward. > + > + When the JIT is used, things become more complex, as functions can be > + inlined into each other. In this case a frame chain can look like this: > + > + +---------------+ > + | real_frame | > + +---------------+ > + | ^ > + | f_back_some > + | | > + | | f_forward > + | +--------------+ > + | | virtual frame| > + | +--------------+ > + | ^ > + | | f_forward > + | +--------------+ > + | | virtual frame| > + | +--------------+ > + | ^ > + | | > + v | f_forward > + +---------------+ > + | real_frame | > + +---------------+ > + > + """ > > __metaclass__ = extendabletype > > frame_finished_execution = False > last_instr = -1 > last_exception = None > - f_back = None # these two should be modified together > - f_forward = None # they make a doubly-linked list > + f_back_some = None # these two should be modified together > + f_forward = None # they make a sort of doubly-linked list > + f_back_forced = False > w_f_trace = None > # For tracing > instr_lb = 0 > @@ -286,7 +321,7 @@ > w_tb = w(self.last_exception.application_traceback) > > tup_state = [ > - w(self.f_back), > + w(self.f_back()), > w(self.get_builtin()), > w(self.pycode), > w_valuestack, > @@ -338,7 +373,9 @@ > # do not use the instance's __init__ but the base's, because we set > # everything like cells from here > PyFrame.__init__(self, space, pycode, w_globals, closure) > - new_frame.f_back = space.interp_w(PyFrame, w_f_back, can_be_None=True) > + new_frame.f_back_some = space.interp_w(PyFrame, w_f_back, can_be_None=True) > + new_frame.f_back_forced = True > + > new_frame.builtin = space.interp_w(Module, w_builtin) > new_frame.blockstack = [unpickle_block(space, w_blk) > for w_blk in space.unpackiterable(w_blockstack)] > @@ -407,6 +444,25 @@ > def _setcellvars(self, cellvars): > pass > > + def f_back(self): > + back_some = self.f_back_some > + if self.f_back_forced: > + # don't check back_some.f_forward in this case > + return back_some > + if back_some is None: > + return None > + while back_some.f_forward is not self: > + back_some = back_some.f_forward > + return back_some > + > + def force_f_back(self): > + self.f_back_some = f_back = self.f_back() > + self.f_back_forced = True > + if f_back is not None: > + f_back.force_f_back() > + return f_back > + > + > ### line numbers ### > > # for f*_f_* unwrapping through unwrap_spec in typedef.py > @@ -550,7 +606,7 @@ > return self.get_builtin().getdict() > > def fget_f_back(space, self): > - return self.space.wrap(self.f_back) > + return self.space.wrap(self.f_back()) > > def fget_f_lasti(space, self): > return self.space.wrap(self.last_instr) > @@ -571,27 +627,27 @@ > > def fget_f_exc_type(space, self): > if self.last_exception is not None: > - f = self.f_back > + f = self.f_back() > while f is not None and f.last_exception is None: > - f = f.f_back > + f = f.f_back() > if f is not None: > return f.last_exception.w_type > return space.w_None > > def fget_f_exc_value(space, self): > if self.last_exception is not None: > - f = self.f_back > + f = self.f_back() > while f is not None and f.last_exception is None: > - f = f.f_back > + f = f.f_back() > if f is not None: > return f.last_exception.w_value > return space.w_None > > def fget_f_exc_traceback(space, self): > if self.last_exception is not None: > - f = self.f_back > + f = self.f_back() > while f is not None and f.last_exception is None: > - f = f.f_back > + f = f.f_back() > if f is not None: > return space.wrap(f.last_exception.application_traceback) > return space.w_None > > Modified: pypy/branch/spine-of-frames/pypy/interpreter/pytraceback.py > ============================================================================== > --- pypy/branch/spine-of-frames/pypy/interpreter/pytraceback.py (original) > +++ pypy/branch/spine-of-frames/pypy/interpreter/pytraceback.py Wed Sep 2 17:53:35 2009 > @@ -46,6 +46,7 @@ > self.next = space.interp_w(PyTraceback, w_next, can_be_None=True) > > def record_application_traceback(space, operror, frame, last_instruction): > + frame.force_f_back() > if frame.pycode.hidden_applevel: > return > lineno = offset2lineno(frame.pycode, last_instruction) > > Modified: pypy/branch/spine-of-frames/pypy/interpreter/test/test_zzpickle_and_slow.py > ============================================================================== > --- pypy/branch/spine-of-frames/pypy/interpreter/test/test_zzpickle_and_slow.py (original) > +++ pypy/branch/spine-of-frames/pypy/interpreter/test/test_zzpickle_and_slow.py Wed Sep 2 17:53:35 2009 > @@ -30,18 +30,23 @@ > from pypy.interpreter import pytraceback > def hide_top_frame(space, w_frame): > w_last = None > - while w_frame.f_back: > + while w_frame.f_back(): > + # should have been forced by traceback capturing > + assert w_frame.f_back_forced > w_last = w_frame > - w_frame = w_frame.f_back > + w_frame = w_frame.f_back() > assert w_last > - w_saved = w_last.f_back > - w_last.f_back = None > + w_saved = w_last.f_back() > + w_last.f_back_some = None > + w_saved.f_forward = None > return w_saved > > def restore_top_frame(space, w_frame, w_saved): > - while w_frame.f_back: > - w_frame = w_frame.f_back > - w_frame.f_back = w_saved > + while w_frame.f_back(): > + assert w_frame.f_back_forced > + w_frame = w_frame.f_back() > + w_frame.f_back_some = w_saved > + w_saved.f_forward = w_frame > > def read_exc_type(space, w_frame): > if w_frame.last_exception is None: > > Modified: pypy/branch/spine-of-frames/pypy/module/sys/test/test_sysmodule.py > ============================================================================== > --- pypy/branch/spine-of-frames/pypy/module/sys/test/test_sysmodule.py (original) > +++ pypy/branch/spine-of-frames/pypy/module/sys/test/test_sysmodule.py Wed Sep 2 17:53:35 2009 > @@ -337,6 +337,16 @@ > # is sys._getframe().f_code > #) > > + def test_getframe_in_returned_func(self): > + def f(): > + return g() > + def g(): > + return sys._getframe(0) > + frame = f() > + assert frame.f_code.co_name == 'g' > + assert frame.f_back.f_code.co_name == 'f' > + assert frame.f_back.f_back.f_code.co_name == 'test_getframe_in_returned_func' > + > def test_attributes(self): > assert sys.__name__ == 'sys' > assert isinstance(sys.modules, dict) > > Modified: pypy/branch/spine-of-frames/pypy/module/sys/vm.py > ============================================================================== > --- pypy/branch/spine-of-frames/pypy/module/sys/vm.py (original) > +++ pypy/branch/spine-of-frames/pypy/module/sys/vm.py Wed Sep 2 17:53:35 2009 > @@ -31,6 +31,7 @@ > space.wrap("frame index must not be negative")) > ec = space.getexecutioncontext() > f = ec.gettopframe_nohidden() > + f.force_f_back() > while True: > if f is None: > raise OperationError(space.w_ValueError, > _______________________________________________ > pypy-svn mailing list > pypy-svn at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-svn > From lac at openend.se Fri Sep 4 13:16:26 2009 From: lac at openend.se (Laura Creighton) Date: Fri, 04 Sep 2009 13:16:26 +0200 Subject: [pypy-dev] Jesse Noller tried to invite us to something and was rudely rejected... Message-ID: <200909041116.n84BGQq7023127@theraft.openend.se> Are we rejecting messages from non-members of the list automatically these days? And if so, is this really necessary? And shouldn't we change the reject message to indicate that? Or is jnoller at gmail.com on some special 'banned from posting' list, and if so, why? Laura ------- Forwarded Message Return-Path: jnoller at gmail.com Delivery-Date: Fri Sep 4 12:20:35 2009 From: Jesse Noller To: Laura Creighton Content-Type: multipart/alternative; Hmm Begin forwarded message: > From: pypy-dev-owner at codespeak.net > Date: September 3, 2009 10:13:08 PM EDT > To: jnoller at python.org > Subject: PyCon 2010: Plenary talk invite > > You are not allowed to post to this mailing list, and your message has > been automatically rejected. If you think that your messages are > being rejected in error, contact the mailing list owner at > pypy-dev-owner at codespeak.net. > > Hello there PyPy! > > I am reaching out to you today as the PyCon 2010 Program Committee > Chair, and on the behalf of the committee as a whole. Part of the > conference is the keynote/plenary talks - these talks range in time > length and are presented to the conference as a whole. > > We're planning out PyCon 2010 - and in discussions, we felt that an > excellent addition to PyCon would be a series of plenary talks (15 > minutes in length) on showcasing the state of the various > implementations of Python. I'd like to extend an invitation to one > of you > to act as the "lead" for PyPy and do one of these for us. > > Python is so much more than one implementation - and I know that > I, and the community as a whole would love to hear anything you had to > offer about your implementation. You can do something as simple as a > state of the union talk, things you would like to see from > python-core, or even encourage people to help contribute to your > project. Anything directly related to your implementation is fair > game. > > If you are interested in presenting, please get back to me as soon > as possible indicating your acceptance. If you do not want to do this, > and possibly have someone else in mind to present for you - by all > means please feel free to send me their contact information. > > The PyCon conference days will be February 19-21, 2010 in Atlanta, > Georgia, preceded by the tutorial days (February 17-18), and followed > by four days of development sprints (February 22-25). > > I look forward to hearing from you. If you have any questions, > please feel free to contact me (jnoller at python.org), or you can reach > out to the PyCon Chair - Van Lindberg (van at python.org) > > Jesse Noller > jnoller at python.org > Program Committee Chair ------- End of Forwarded Message From fuzzyman at gmail.com Fri Sep 4 14:41:57 2009 From: fuzzyman at gmail.com (Michael Foord) Date: Fri, 4 Sep 2009 13:41:57 +0100 Subject: [pypy-dev] Jesse Noller tried to invite us to something and was rudely rejected... In-Reply-To: <200909041116.n84BGQq7023127@theraft.openend.se> References: <200909041116.n84BGQq7023127@theraft.openend.se> Message-ID: <6f4025010909040541w4f565319v849d920fcd553aa1@mail.gmail.com> Most mailing lists I'm on these days ban posts from non-members, otherwise they get overwhelmed with spam unfortunately. :-( Michael 2009/9/4 Laura Creighton > > Are we rejecting messages from non-members of the list automatically > these days? And if so, is this really necessary? And shouldn't we > change the reject message to indicate that? > > Or is jnoller at gmail.com on some special 'banned from posting' list, > and if so, why? > > Laura > > ------- Forwarded Message > > Return-Path: jnoller at gmail.com > Delivery-Date: Fri Sep 4 12:20:35 2009 > From: Jesse Noller > To: Laura Creighton > Content-Type: multipart/alternative; > > Hmm > > > Begin forwarded message: > > > From: pypy-dev-owner at codespeak.net > > Date: September 3, 2009 10:13:08 PM EDT > > To: jnoller at python.org > > Subject: PyCon 2010: Plenary talk invite > > > > > You are not allowed to post to this mailing list, and your message has > > been automatically rejected. If you think that your messages are > > being rejected in error, contact the mailing list owner at > > pypy-dev-owner at codespeak.net. > > > > Hello there PyPy! > > > > I am reaching out to you today as the PyCon 2010 Program Committee > > Chair, and on the behalf of the committee as a whole. Part of the > > conference is the keynote/plenary talks - these talks range in time > > length and are presented to the conference as a whole. > > > > We're planning out PyCon 2010 - and in discussions, we felt that an > > excellent addition to PyCon would be a series of plenary talks (15 > > minutes in length) on showcasing the state of the various > > implementations of Python. I'd like to extend an invitation to one > > of you > > to act as the "lead" for PyPy and do one of these for us. > > > > Python is so much more than one implementation - and I know that > > I, and the community as a whole would love to hear anything you had to > > offer about your implementation. You can do something as simple as a > > state of the union talk, things you would like to see from > > python-core, or even encourage people to help contribute to your > > project. Anything directly related to your implementation is fair > > game. > > > > If you are interested in presenting, please get back to me as soon > > as possible indicating your acceptance. If you do not want to do this, > > and possibly have someone else in mind to present for you - by all > > means please feel free to send me their contact information. > > > > The PyCon conference days will be February 19-21, 2010 in Atlanta, > > Georgia, preceded by the tutorial days (February 17-18), and followed > > by four days of development sprints (February 22-25). > > > > I look forward to hearing from you. If you have any questions, > > please feel free to contact me (jnoller at python.org), or you can reach > > out to the PyCon Chair - Van Lindberg (van at python.org) > > > > Jesse Noller > > jnoller at python.org > > Program Committee Chair > > > ------- End of Forwarded Message > > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > -- http://www.ironpythoninaction.com/ -------------- next part -------------- An HTML attachment was scrubbed... URL: From holger at merlinux.eu Fri Sep 4 15:26:49 2009 From: holger at merlinux.eu (holger krekel) Date: Fri, 4 Sep 2009 15:26:49 +0200 Subject: [pypy-dev] Jesse Noller tried to invite us to something and was rudely rejected... In-Reply-To: <6f4025010909040541w4f565319v849d920fcd553aa1@mail.gmail.com> References: <200909041116.n84BGQq7023127@theraft.openend.se> <6f4025010909040541w4f565319v849d920fcd553aa1@mail.gmail.com> Message-ID: <20090904132649.GS15455@trillke.net> On Fri, Sep 04, 2009 at 13:41 +0100, Michael Foord wrote: > Most mailing lists I'm on these days ban posts from non-members, otherwise > they get overwhelmed with spam unfortunately. :-( yip, that's the case here as well. holger > 2009/9/4 Laura Creighton > > > > > Are we rejecting messages from non-members of the list automatically > > these days? And if so, is this really necessary? And shouldn't we > > change the reject message to indicate that? > > > > Or is jnoller at gmail.com on some special 'banned from posting' list, > > and if so, why? > > > > Laura > > > > ------- Forwarded Message > > > > Return-Path: jnoller at gmail.com > > Delivery-Date: Fri Sep 4 12:20:35 2009 > > From: Jesse Noller > > To: Laura Creighton > > Content-Type: multipart/alternative; > > > > Hmm > > > > > > Begin forwarded message: > > > > > From: pypy-dev-owner at codespeak.net > > > Date: September 3, 2009 10:13:08 PM EDT > > > To: jnoller at python.org > > > Subject: PyCon 2010: Plenary talk invite > > > > > > > > You are not allowed to post to this mailing list, and your message has > > > been automatically rejected. If you think that your messages are > > > being rejected in error, contact the mailing list owner at > > > pypy-dev-owner at codespeak.net. > > > > > > Hello there PyPy! > > > > > > I am reaching out to you today as the PyCon 2010 Program Committee > > > Chair, and on the behalf of the committee as a whole. Part of the > > > conference is the keynote/plenary talks - these talks range in time > > > length and are presented to the conference as a whole. > > > > > > We're planning out PyCon 2010 - and in discussions, we felt that an > > > excellent addition to PyCon would be a series of plenary talks (15 > > > minutes in length) on showcasing the state of the various > > > implementations of Python. I'd like to extend an invitation to one > > > of you > > > to act as the "lead" for PyPy and do one of these for us. > > > > > > Python is so much more than one implementation - and I know that > > > I, and the community as a whole would love to hear anything you had to > > > offer about your implementation. You can do something as simple as a > > > state of the union talk, things you would like to see from > > > python-core, or even encourage people to help contribute to your > > > project. Anything directly related to your implementation is fair > > > game. > > > > > > If you are interested in presenting, please get back to me as soon > > > as possible indicating your acceptance. If you do not want to do this, > > > and possibly have someone else in mind to present for you - by all > > > means please feel free to send me their contact information. > > > > > > The PyCon conference days will be February 19-21, 2010 in Atlanta, > > > Georgia, preceded by the tutorial days (February 17-18), and followed > > > by four days of development sprints (February 22-25). > > > > > > I look forward to hearing from you. If you have any questions, > > > please feel free to contact me (jnoller at python.org), or you can reach > > > out to the PyCon Chair - Van Lindberg (van at python.org) > > > > > > Jesse Noller > > > jnoller at python.org > > > Program Committee Chair > > > > > > ------- End of Forwarded Message > > > > _______________________________________________ > > pypy-dev at codespeak.net > > http://codespeak.net/mailman/listinfo/pypy-dev > > > > > > -- > http://www.ironpythoninaction.com/ > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev -- Metaprogramming, Python, Testing: http://tetamap.wordpress.com Python, PyPy, pytest contracting: http://merlinux.eu From fijall at gmail.com Fri Sep 4 15:35:32 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Fri, 4 Sep 2009 07:35:32 -0600 Subject: [pypy-dev] Jesse Noller tried to invite us to something and was rudely rejected... In-Reply-To: <20090904132649.GS15455@trillke.net> References: <200909041116.n84BGQq7023127@theraft.openend.se> <6f4025010909040541w4f565319v849d920fcd553aa1@mail.gmail.com> <20090904132649.GS15455@trillke.net> Message-ID: <693bc9ab0909040635l5b569f0ex471deedce0b0f0bf@mail.gmail.com> Hey. I'm going to pycon so I suppose I'll just write him. On Fri, Sep 4, 2009 at 7:26 AM, holger krekel wrote: > On Fri, Sep 04, 2009 at 13:41 +0100, Michael Foord wrote: >> Most mailing lists I'm on these days ban posts from non-members, otherwise >> they get overwhelmed with spam unfortunately. :-( > > yip, that's the case here as well. > > holger > >> 2009/9/4 Laura Creighton >> >> > >> > Are we rejecting messages from non-members of the list automatically >> > these days? ?And if so, is this really necessary? ?And shouldn't we >> > change the reject message to indicate that? >> > >> > Or is jnoller at gmail.com on some special 'banned from posting' list, >> > and if so, why? >> > >> > Laura >> > >> > ------- Forwarded Message >> > >> > Return-Path: jnoller at gmail.com >> > Delivery-Date: Fri Sep ?4 12:20:35 2009 >> > From: Jesse Noller >> > To: Laura Creighton >> > Content-Type: multipart/alternative; >> > >> > Hmm >> > >> > >> > Begin forwarded message: >> > >> > > From: pypy-dev-owner at codespeak.net >> > > Date: September 3, 2009 10:13:08 PM EDT >> > > To: jnoller at python.org >> > > Subject: PyCon 2010: Plenary talk invite >> > > >> > >> > > You are not allowed to post to this mailing list, and your message has >> > > been automatically rejected. ?If you think that your messages are >> > > being rejected in error, contact the mailing list owner at >> > > pypy-dev-owner at codespeak.net. >> > > >> > > Hello there PyPy! >> > > >> > > ? ?I am reaching out to you today as the PyCon 2010 Program Committee >> > > Chair, and on the behalf of the committee as a whole. Part of the >> > > conference is the keynote/plenary talks - these talks range in time >> > > length and are presented to the conference as a whole. >> > > >> > > ? We're planning out PyCon 2010 - and in discussions, we felt that an >> > > excellent addition to PyCon would be a series of plenary talks (15 >> > > minutes in length) on showcasing the state of the various >> > > implementations of Python. I'd like to extend an invitation to one >> > > of you >> > > to act as the "lead" for PyPy and do one of these for us. >> > > >> > > ? Python is so much more than one implementation - and I know that >> > > I, and the community as a whole would love to hear anything you had to >> > > offer about your implementation. You can do something as simple as a >> > > state of the union talk, things you would like to see from >> > > python-core, or even encourage people to help contribute to your >> > > project. Anything directly related to your implementation is fair >> > > game. >> > > >> > > ? If you are interested in presenting, please get back to me as soon >> > > as possible indicating your acceptance. If you do not want to do this, >> > > and possibly have someone else in mind to present for you - by all >> > > means please feel free to send me their contact information. >> > > >> > > ? The PyCon conference days will be February 19-21, 2010 in Atlanta, >> > > Georgia, preceded by the tutorial days (February 17-18), and followed >> > > by four days of development sprints (February 22-25). >> > > >> > > ? I look forward to hearing from you. If you have any questions, >> > > please feel free to contact me (jnoller at python.org), or you can reach >> > > out to the PyCon Chair - Van Lindberg (van at python.org) >> > > >> > > Jesse Noller >> > > jnoller at python.org >> > > Program Committee Chair >> > >> > >> > ------- End of Forwarded Message >> > >> > _______________________________________________ >> > pypy-dev at codespeak.net >> > http://codespeak.net/mailman/listinfo/pypy-dev >> > >> >> >> >> -- >> http://www.ironpythoninaction.com/ > >> _______________________________________________ >> pypy-dev at codespeak.net >> http://codespeak.net/mailman/listinfo/pypy-dev > > -- > Metaprogramming, Python, Testing: http://tetamap.wordpress.com > Python, PyPy, pytest contracting: http://merlinux.eu > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > From lac at openend.se Fri Sep 4 15:37:36 2009 From: lac at openend.se (Laura Creighton) Date: Fri, 04 Sep 2009 15:37:36 +0200 Subject: [pypy-dev] Jesse Noller tried to invite us to something and was rudely rejected... In-Reply-To: Message from Michael Foord of "Fri, 04 Sep 2009 13:41:57 BST." <6f4025010909040541w4f565319v849d920fcd553aa1@mail.gmail.com> References: <200909041116.n84BGQq7023127@theraft.openend.se> <6f4025010909040541w4f565319v849d920fcd553aa1@mail.gmail.com> Message-ID: <200909041337.n84Dbasv032242@theraft.openend.se> In a message of Fri, 04 Sep 2009 13:41:57 BST, Michael Foord writes: >--001485e36bef6120af0472bfd3cb >Content-Type: text/plain; charset=ISO-8859-1 > >Most mailing lists I'm on these days ban posts from non-members, otherwis >e >they get overwhelmed with spam unfortunately. :-( > >Michael python.org mailing lists are pretty good at catching the spam before they show up on your lists which is why I can let most of the lists I run just let email from non-members get moderated in or out. Maybe that software that python.org is running needs to be packaged up to make it easier for other mailman lists to install and use it? Laura From pedronis at openend.se Mon Sep 7 10:45:12 2009 From: pedronis at openend.se (Samuele Pedroni) Date: Mon, 07 Sep 2009 10:45:12 +0200 Subject: [pypy-dev] [pypy-svn] r67547 - pypy/trunk/pypy/module/__builtin__ In-Reply-To: <20090906202819.84921168014@codespeak.net> References: <20090906202819.84921168014@codespeak.net> Message-ID: <4AA4C818.5020305@openend.se> this broke the pickling of enumerate: http://codespeak.net:8099/summary/longrepr?testname=AppTestInterpObjectPickling().test_pickle_enum&builder=own-linux-x86-32&build=541&mod=pypy.interpreter.test.test_zzpickle_and_slow I suppose reversed should to be pickable too. benjamin at codespeak.net wrote: > Author: benjamin > Date: Sun Sep 6 22:28:16 2009 > New Revision: 67547 > > Modified: > pypy/trunk/pypy/module/__builtin__/__init__.py > pypy/trunk/pypy/module/__builtin__/app_functional.py > pypy/trunk/pypy/module/__builtin__/functional.py > Log: > reimplement min, max, reduce, sum, filter, map, zip, enumerate, and reversed on > the interp level for speed > > > Modified: pypy/trunk/pypy/module/__builtin__/__init__.py > ============================================================================== > --- pypy/trunk/pypy/module/__builtin__/__init__.py (original) > +++ pypy/trunk/pypy/module/__builtin__/__init__.py Sun Sep 6 22:28:16 2009 > @@ -27,23 +27,11 @@ > 'raw_input' : 'app_io.raw_input', > 'input' : 'app_io.input', > > - 'sum' : 'app_functional.sum', > 'apply' : 'app_functional.apply', > - 'map' : 'app_functional.map', > - 'filter' : 'app_functional.filter', > - 'zip' : 'app_functional.zip', > - 'reduce' : 'app_functional.reduce', > #'range' : 'app_functional.range', > # redirected to functional.py, applevel version > # is still needed and should stay where it is. > - 'min' : 'app_functional.min', > - 'max' : 'app_functional.max', > - 'enumerate' : 'app_functional.enumerate', > 'sorted' : 'app_functional.sorted', > - 'reversed' : 'app_functional.reversed', > - '_install_pickle_support_for_reversed_iterator': > - 'app_functional._install_pickle_support_for_reversed_iterator', > - > 'globals' : 'app_inspect.globals', > 'locals' : 'app_inspect.locals', > 'vars' : 'app_inspect.vars', > @@ -106,8 +94,17 @@ > > 'range' : 'functional.range_int', > 'xrange' : 'functional.W_XRange', > + 'enumerate' : 'functional.W_Enumerate', > 'all' : 'functional.all', > 'any' : 'functional.any', > + 'min' : 'functional.min', > + 'max' : 'functional.max', > + 'sum' : 'functional.sum', > + 'map' : 'functional.map', > + 'zip' : 'functional.zip', > + 'reduce' : 'functional.reduce', > + 'reversed' : 'functional.reversed', > + 'filter' : 'functional.filter', > 'super' : 'descriptor.W_Super', > 'staticmethod' : 'descriptor.StaticMethod', > 'classmethod' : 'descriptor.ClassMethod', > > Modified: pypy/trunk/pypy/module/__builtin__/app_functional.py > ============================================================================== > --- pypy/trunk/pypy/module/__builtin__/app_functional.py (original) > +++ pypy/trunk/pypy/module/__builtin__/app_functional.py Sun Sep 6 22:28:16 2009 > @@ -3,151 +3,12 @@ > functional programming. > """ > > - > -def sum(sequence, total=0): > - """sum(sequence, start=0) -> value > - > -Returns the sum of a sequence of numbers (NOT strings) plus the value > -of parameter 'start'. When the sequence is empty, returns start.""" > - # must forbid "summing" strings, per specs of built-in 'sum' > - if isinstance(total, str): raise TypeError > - for item in sequence: > - total = total + item > - return total > - > # ____________________________________________________________ > > def apply(function, args=(), kwds={}): > """call a function (or other callable object) and return its result""" > return function(*args, **kwds) > > -def map(function, *collections): > - """does 3 separate things, hence this enormous docstring. > - 1. if function is None, return a list of tuples, each with one > - item from each collection. If the collections have different > - lengths, shorter ones are padded with None. > - > - 2. if function is not None, and there is only one collection, > - apply function to every item in the collection and return a > - list of the results. > - > - 3. if function is not None, and there are several collections, > - repeatedly call the function with one argument from each > - collection. If the collections have different lengths, > - shorter ones are padded with None > - """ > - > - if len(collections) == 0: > - raise TypeError, "map() requires at least one sequence" > - > - if len(collections) == 1: > - #it's the most common case, so make it faster > - if function is None: > - return list(collections[0]) > - return [function(x) for x in collections[0]] > - > - iterators = [ iter(collection) for collection in collections ] > - res = [] > - while 1: > - cont = False #is any collection not empty? > - args = [] > - for iterator in iterators: > - try: > - elem = iterator.next() > - cont = True > - except StopIteration: > - elem = None > - args.append(elem) > - if cont: > - if function is None: > - res.append(tuple(args)) > - else: > - res.append(function(*args)) > - else: > - return res > - > -def filterstring(function, collection, str_type): > - if function is None and type(collection) is str_type: > - return collection > - res = [] > - for i in xrange(len(collection)): > - c = collection[i] > - if function is None or function(c): > - if not isinstance(c, str_type): > - raise TypeError("can't filter %s to %s: __getitem__ returned different type", str_type.__name__, str_type.__name__) > - res.append(c) > - return str_type().join(res) > - > -def filtertuple(function, collection): > - if function is None: > - function = bool > - res = [] > - for i in xrange(len(collection)): > - c = collection[i] > - if function(c): > - res.append(c) > - return tuple(res) > - > -def filter(function, collection): > - """construct a list of those elements of collection for which function > - is True. If function is None, then return the items in the sequence > - which are True.""" > - if isinstance(collection, str): > - return filterstring(function, collection, str) > - elif isinstance(collection, unicode): > - return filterstring(function, collection, unicode) > - elif isinstance(collection, tuple): > - return filtertuple(function, collection) > - > - if function is None: > - return [item for item in collection if item] > - else: > - return [item for item in collection if function(item)] > - > -def zip(*collections): > - """return a list of tuples, where the nth tuple contains every > - nth item of each collection. If the collections have different > - lengths, zip returns a list as long as the shortest collection, > - ignoring the trailing items in the other collections.""" > - > - if len(collections) == 0: > - import sys > - if sys.version_info < (2,4): > - raise TypeError("zip() requires at least one sequence") > - return [] > - res = [] > - iterators = [ iter(collection) for collection in collections ] > - while 1: > - try: > - elems = [] > - for iterator in iterators: > - elems.append(iterator.next()) > - res.append(tuple(elems)) > - except StopIteration: > - return res > - > -def reduce(function, seq, *initialt): > - """ Apply function of two arguments cumulatively to the items of > - sequence, from left to right, so as to reduce the sequence to a > - single value. Optionally begin with an initial value.""" > - > - seqiter = iter(seq) > - if initialt: > - initial, = initialt > - else: > - try: > - initial = seqiter.next() > - except StopIteration: > - raise TypeError, "reduce() of empty sequence with no initial value" > - while 1: > - try: > - arg = seqiter.next() > - except StopIteration: > - break > - initial = function(initial, arg) > - > - return initial > - > # ____________________________________________________________ > > """ > @@ -206,135 +67,10 @@ > > # ____________________________________________________________ > > - > -def _identity(arg): > - return arg > - > - > -def min(*arr, **kwargs): > - """return the smallest number in a list, > - or its smallest argument if more than one is given.""" > - from operator import gt > - > - return min_max(gt, "min", *arr, **kwargs) > - > -def min_max(comp, funcname, *arr, **kwargs): > - key = kwargs.pop("key", _identity) > - if len(kwargs): > - raise TypeError, '%s() got an unexpected keyword argument' % funcname > - > - if not arr: > - raise TypeError, '%s() takes at least one argument' % funcname > - > - if len(arr) == 1: > - arr = arr[0] > - > - iterator = iter(arr) > - try: > - min_max_val = iterator.next() > - except StopIteration: > - raise ValueError, '%s() arg is an empty sequence' % funcname > - > - keyed_min_max_val = key(min_max_val) > - > - for i in iterator: > - keyed = key(i) > - if comp(keyed_min_max_val, keyed): > - min_max_val = i > - keyed_min_max_val = keyed > - return min_max_val > - > -def max(*arr, **kwargs): > - """return the largest number in a list, > - or its largest argument if more than one is given.""" > - from operator import lt > - > - return min_max(lt, "max", *arr, **kwargs) > - > -class enumerate(object): > - """enumerate(iterable) -> iterator for (index, value) of iterable. > - > -Return an enumerate object. iterable must be an other object that supports > -iteration. The enumerate object yields pairs containing a count (from > -zero) and a value yielded by the iterable argument. enumerate is useful > -for obtaining an indexed list: (0, seq[0]), (1, seq[1]), (2, seq[2]), ...""" > - > - def __init__(self, collection): > - self._iter = iter(collection) > - self._index = 0 > - > - def next(self): > - try: > - next = self._iter.next > - except AttributeError: > - # CPython raises a TypeError when next() is not defined > - raise TypeError('%s object has no next() method' % > - (type(self._iter).__name__,)) > - result = self._index, next() > - self._index += 1 > - return result > - > - def __iter__(self): > - return self > - > - > -# ____________________________________________________________ > - > def sorted(lst, cmp=None, key=None, reverse=None): > "sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list" > sorted_lst = list(lst) > sorted_lst.sort(cmp, key, reverse) > return sorted_lst > > -def reversed(sequence): > - "reversed(sequence) -> reverse iterator over values of the sequence" > - if hasattr(sequence, '__reversed__'): > - return sequence.__reversed__() > - if not hasattr(sequence, '__getitem__'): > - raise TypeError("argument to reversed() must be a sequence") > - return reversed_iterator(sequence) > - > - > -class reversed_iterator(object): > - > - def __init__(self, seq): > - self.seq = seq > - self.remaining = len(seq) > - > - def __iter__(self): > - return self > - > - def next(self): > - if self.remaining > len(self.seq): > - self.remaining = 0 > - i = self.remaining > - if i > 0: > - i -= 1 > - item = self.seq[i] > - self.remaining = i > - return item > - raise StopIteration > - > -# XXX __length_hint__() > -## def __len__(self): > -## if self.remaining > len(self.seq): > -## self.remaining = 0 > -## return self.remaining > - > - def __reduce__(self): > - tup = (self.seq, self.remaining) > - return (make_reversed_iterator, tup) > - > -def make_reversed_iterator(seq, remaining): > - ri = reversed_iterator.__new__(reversed_iterator) > - ri.seq = seq > - #or "ri = reversed_iterator(seq)" but that executes len(seq) > - ri.remaining = remaining > - return ri > - > -def _install_pickle_support_for_reversed_iterator(): > - import _pickle_support > - make_reversed_iterator.__module__ = '_pickle_support' > - _pickle_support.make_reversed_iterator = make_reversed_iterator > - > > > Modified: pypy/trunk/pypy/module/__builtin__/functional.py > ============================================================================== > --- pypy/trunk/pypy/module/__builtin__/functional.py (original) > +++ pypy/trunk/pypy/module/__builtin__/functional.py Sun Sep 6 22:28:16 2009 > @@ -8,7 +8,9 @@ > from pypy.interpreter.gateway import interp2app > from pypy.interpreter.typedef import TypeDef > from pypy.interpreter.baseobjspace import Wrappable > +from pypy.interpreter.argument import Arguments > from pypy.rlib.rarithmetic import r_uint, intmask > +from pypy.rlib.objectmodel import specialize > from pypy.module.__builtin__.app_functional import range as app_range > from inspect import getsource, getfile > > @@ -100,7 +102,244 @@ > return W_ListMultiObject(space, impl) > > > + at specialize.arg(2) > +def min_max(space, arguments, implementation_of): > + if implementation_of == "max": > + compare = space.gt > + else: > + compare = space.lt > + args, kwargs = arguments.unpack() > + if len(args) > 1: > + w_sequence = space.newtuple(args) > + elif len(args): > + w_sequence = args[0] > + else: > + msg = "%s() expects at least one argument" % (implementation_of,) > + raise OperationError(space.w_TypeError, space.wrap(msg)) > + try: > + w_key = kwargs["key"] > + except KeyError: > + w_key = None > + else: > + del kwargs["key"] > + if kwargs: > + msg = "%s() got unexpected keyword argument" % (implementation_of,) > + raise OperationError(space.w_TypeError, space.wrap(msg)) > + w_iter = space.iter(w_sequence) > + w_max_item = None > + w_max_val = None > + while True: > + try: > + w_item = space.next(w_iter) > + except OperationError, e: > + if not e.match(space, space.w_StopIteration): > + raise > + break > + if w_key is not None: > + w_compare_with = space.call_function(w_key, w_item) > + else: > + w_compare_with = w_item > + if w_max_item is None or \ > + space.is_true(compare(w_compare_with, w_max_val)): > + w_max_item = w_item > + w_max_val = w_compare_with > + if w_max_item is None: > + msg = "arg is an empty sequence" > + raise OperationError(space.w_ValueError, space.wrap(msg)) > + return w_max_item > > +def max(space, __args__): > + """Return the largest item in a sequence. > + > + If more than one argument is passed, return the maximum of them. > + """ > + return min_max(space, __args__, "max") > +max.unwrap_spec = [ObjSpace, Arguments] > + > +def min(space, __args__): > + """Return the smallest item in a sequence. > + > + If more than one argument is passed, return the minimum of them. > + """ > + return min_max(space, __args__, "min") > +min.unwrap_spec = [ObjSpace, Arguments] > + > +def map(space, w_func, collections_w): > + """does 3 separate things, hence this enormous docstring. > + 1. if function is None, return a list of tuples, each with one > + item from each collection. If the collections have different > + lengths, shorter ones are padded with None. > + > + 2. if function is not None, and there is only one collection, > + apply function to every item in the collection and return a > + list of the results. > + > + 3. if function is not None, and there are several collections, > + repeatedly call the function with one argument from each > + collection. If the collections have different lengths, > + shorter ones are padded with None > + """ > + if not collections_w: > + msg = "map() requires at least two arguments" > + raise OperationError(space.w_TypeError, space.wrap(msg)) > + num_collections = len(collections_w) > + none_func = space.is_w(w_func, space.w_None) > + if none_func and num_collections == 1: > + return space.call_function(space.w_list, collections_w[0]) > + result_w = [] > + iterators_w = [space.iter(w_seq) for w_seq in collections_w] > + num_iterators = len(iterators_w) > + while True: > + cont = False > + args_w = [space.w_None] * num_iterators > + for i in range(len(iterators_w)): > + try: > + args_w[i] = space.next(iterators_w[i]) > + except OperationError, e: > + if not e.match(space, space.w_StopIteration): > + raise > + else: > + cont = True > + w_args = space.newtuple(args_w) > + if cont: > + if none_func: > + result_w.append(w_args) > + else: > + w_res = space.call(w_func, w_args) > + result_w.append(w_res) > + else: > + return space.newlist(result_w) > +map.unwrap_spec = [ObjSpace, W_Root, "args_w"] > + > +def sum(space, w_sequence, w_start=None): > + if space.is_w(w_start, space.w_None): > + w_start = space.wrap(0) > + elif space.is_true(space.isinstance(w_start, space.w_basestring)): > + msg = "sum() can't sum strings" > + raise OperationError(space.w_TypeError, space.wrap(msg)) > + w_iter = space.iter(w_sequence) > + w_last = w_start > + while True: > + try: > + w_next = space.next(w_iter) > + except OperationError, e: > + if not e.match(space, space.w_StopIteration): > + raise > + break > + w_last = space.add(w_last, w_next) > + return w_last > +sum.unwrap_spec = [ObjSpace, W_Root, W_Root] > + > +def zip(space, sequences_w): > + """Return a list of tuples, where the nth tuple contains every nth item of > + each collection. > + > + If the collections have different lengths, zip returns a list as long as the > + shortest collection, ignoring the trailing items in the other collections. > + """ > + if not sequences_w: > + return space.newlist([]) > + result_w = [] > + iterators_w = [space.iter(w_seq) for w_seq in sequences_w] > + while True: > + try: > + items_w = [space.next(w_it) for w_it in iterators_w] > + except OperationError, e: > + if not e.match(space, space.w_StopIteration): > + raise > + return space.newlist(result_w) > + result_w.append(space.newtuple(items_w)) > +zip.unwrap_spec = [ObjSpace, "args_w"] > + > +def reduce(space, w_func, w_sequence, rest_w): > + """ Apply function of two arguments cumulatively to the items of sequence, > + from left to right, so as to reduce the sequence to a single value. > + Optionally begin with an initial value. > + """ > + w_iter = space.iter(w_sequence) > + if rest_w: > + if len(rest_w) > 1: > + msg = "reduce() takes only 3 possible arguments" > + raise OperationError(space.w_TypeError, space.wrap(msg)) > + w_initial, = rest_w > + else: > + try: > + w_initial = space.next(w_iter) > + except OperationError, e: > + if e.match(space, space.w_StopIteration): > + msg = "reduce() of empty sequence with no initial value" > + raise OperationError(space.w_TypeError, space.wrap(msg)) > + raise > + w_result = w_initial > + while True: > + try: > + w_next = space.next(w_iter) > + except OperationError, e: > + if not e.match(space, space.w_StopIteration): > + raise > + break > + w_result = space.call_function(w_func, w_result, w_next) > + return w_result > +reduce.unwrap_spec = [ObjSpace, W_Root, W_Root, "args_w"] > + > +def filter(space, w_func, w_seq): > + """construct a list of those elements of collection for which function > + is True. If function is None, then return the items in the sequence > + which are True. > + """ > + if space.is_true(space.isinstance(w_seq, space.w_str)): > + return _filter_string(space, w_func, w_seq, space.w_str) > + if space.is_true(space.isinstance(w_seq, space.w_unicode)): > + return _filter_string(space, w_func, w_seq, space.w_unicode) > + if space.is_true(space.isinstance(w_seq, space.w_tuple)): > + return _filter_tuple(space, w_func, w_seq) > + w_iter = space.iter(w_seq) > + result_w = [] > + none_func = space.is_w(w_func, space.w_None) > + while True: > + try: > + w_next = space.next(w_iter) > + except OperationError, e: > + if not e.match(space, space.w_StopIteration): > + raise > + break > + if none_func: > + w_keep = w_next > + else: > + w_keep = space.call_function(w_func, w_next) > + if space.is_true(w_keep): > + result_w.append(w_next) > + return space.newlist(result_w) > + > +def _filter_tuple(space, w_func, w_tuple): > + none_func = space.is_w(w_func, space.w_None) > + length = space.int_w(space.len(w_tuple)) > + result_w = [] > + for i in range(length): > + w_item = space.getitem(w_tuple, space.wrap(i)) > + if none_func: > + w_keep = w_item > + else: > + w_keep = space.call_function(w_func, w_item) > + if space.is_true(w_keep): > + result_w.append(w_item) > + return space.newtuple(result_w) > + > +def _filter_string(space, w_func, w_string, w_str_type): > + none_func = space.is_w(w_func, space.w_None) > + if none_func and space.is_w(space.type(w_string), w_str_type): > + return w_string > + length = space.int_w(space.len(w_string)) > + result_w = [] > + for i in range(length): > + w_item = space.getitem(w_string, space.wrap(i)) > + if none_func or space.is_true(space.call_function(w_func, w_item)): > + if not space.is_true(space.isinstance(w_item, w_str_type)): > + msg = "__getitem__ returned a non-string type" > + raise OperationError(space.w_TypeError, space.wrap(msg)) > + result_w.append(w_item) > + w_empty = space.call_function(w_str_type) > + return space.call_method(w_empty, "join", space.newlist(result_w)) > > def all(space, w_S): > """all(iterable) -> bool > @@ -138,6 +377,77 @@ > any.unwrap_spec = [ObjSpace, W_Root] > > > +class W_Enumerate(Wrappable): > + > + def __init__(self, w_iter, w_start): > + self.w_iter = w_iter > + self.w_index = w_start > + > + def descr___new__(space, w_subtype, w_iterable): > + self = space.allocate_instance(W_Enumerate, w_subtype) > + self.__init__(space.iter(w_iterable), space.wrap(0)) > + return space.wrap(self) > + > + def descr___iter__(self, space): > + return space.wrap(self) > + descr___iter__.unwrap_spec = ["self", ObjSpace] > + > + def descr_next(self, space): > + w_item = space.next(self.w_iter) > + w_index = self.w_index > + self.w_index = space.add(w_index, space.wrap(1)) > + return space.newtuple([w_index, w_item]) > + descr_next.unwrap_spec = ["self", ObjSpace] > + > + > +W_Enumerate.typedef = TypeDef("enumerate", > + __new__=interp2app(W_Enumerate.descr___new__.im_func), > + __iter__=interp2app(W_Enumerate.descr___iter__), > + next=interp2app(W_Enumerate.descr_next), > +) > + > + > +def reversed(space, w_sequence): > + """Return a iterator that yields items of sequence in reverse.""" > + w_reversed_descr = space.lookup(w_sequence, "__reversed__") > + if w_reversed_descr is None: > + return space.wrap(W_ReversedIterator(space, w_sequence)) > + return space.get_and_call_function(w_reversed_descr, w_sequence) > +reversed.unwrap_spec = [ObjSpace, W_Root] > + > +class W_ReversedIterator(Wrappable): > + > + def __init__(self, space, w_sequence): > + self.remaining = space.int_w(space.len(w_sequence)) - 1 > + self.w_sequence = w_sequence > + > + def descr___iter__(self, space): > + return space.wrap(self) > + descr___iter__.unwrap_spec = ["self", ObjSpace] > + > + def descr_next(self, space): > + if self.remaining >= 0: > + w_index = space.wrap(self.remaining) > + try: > + w_item = space.getitem(self.w_sequence, w_index) > + except OperationError, e: > + if not e.match(space, space.w_StopIteration): > + raise > + else: > + self.remaining -= 1 > + return w_item > + > + # Done > + self.remaining = -1 > + raise OperationError(space.w_StopIteration, space.w_None) > + descr_next.unwrap_spec = ["self", ObjSpace] > + > +W_ReversedIterator.typedef = TypeDef("reversed", > + __iter__=interp2app(W_ReversedIterator.descr___iter__), > + next=interp2app(W_ReversedIterator.descr_next), > +) > + > + > class W_XRange(Wrappable): > def __init__(self, space, start, len, step): > self.space = space > _______________________________________________ > pypy-svn mailing list > pypy-svn at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-svn > From florian.schulze at gmx.net Tue Sep 15 13:39:24 2009 From: florian.schulze at gmx.net (Florian Schulze) Date: Tue, 15 Sep 2009 13:39:24 +0200 Subject: [pypy-dev] ANN: psyco V2 References: <4A5FE6B0.8070500@stackless.com> Message-ID: On Fri, 17 Jul 2009 04:49:20 +0200, Christian Tismer wrote: > Announcing Psyco V2 source release > ---------------------------------- Hi! I just now saw this announcement. My biggest question is, what would it take to support 64 bit? I just recently switched to Snow Leopard and most of the deployments I manage are on 64 bit now. Regards, Florian Schulze From florian.schulze at gmx.net Tue Sep 15 13:43:12 2009 From: florian.schulze at gmx.net (Florian Schulze) Date: Tue, 15 Sep 2009 13:43:12 +0200 Subject: [pypy-dev] Grand Central like multiprocessing? Message-ID: Hi! I wondered whether it would make sense to have multiprocessing implemented the way Grand Central from Apple is done. With the 'with' statement this could be very nicely supported in Python and would make it very easy to use. If PyPy supported something like this and could take advantage of the OS level on OS X 10.6 and maybe future implementations of similar technology on other OSes then Python could get another boost, for example in the scientific and super computing communities. Thoughts? Regards, Florian Schulze From thefridgeowl at gmail.com Tue Sep 15 16:33:21 2009 From: thefridgeowl at gmail.com (Henry Mason) Date: Tue, 15 Sep 2009 07:33:21 -0700 Subject: [pypy-dev] Grand Central like multiprocessing? In-Reply-To: References: Message-ID: Hm, what part of GCD would you use? You can effectively use libdispatch inside of Python already by wrapping the dispatch_* APIs in ctypes and then using ctypes' callback function mechanism to pass Python functions onto dispatch queues. This would get even better if ctypes had first class support for Blocks. (hey that's a good idea...) The problem is that libdispatch doesn't get you around the global interpreter lock, so your Python code would still only run on one thread at a time. But yeah, once the GIL is gone, I imagine libdispatch (and/or some kind of nice pythonic wrapper for it) would be pretty cool. -Henry On Sep 15, 2009, at 4:43 AM, Florian Schulze wrote: > Hi! > > I wondered whether it would make sense to have multiprocessing > implemented > the way Grand Central from Apple is done. With the 'with' statement > this > could be very nicely supported in Python and would make it very easy > to > use. If PyPy supported something like this and could take advantage > of the > OS level on OS X 10.6 and maybe future implementations of similar > technology on other OSes then Python could get another boost, for > example > in the scientific and super computing communities. > > Thoughts? > > Regards, > Florian Schulze > > > > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev From scott+pypy-dev at scottdial.com Tue Sep 15 19:50:48 2009 From: scott+pypy-dev at scottdial.com (Scott Dial) Date: Tue, 15 Sep 2009 13:50:48 -0400 Subject: [pypy-dev] ANN: psyco V2 In-Reply-To: References: <4A5FE6B0.8070500@stackless.com> Message-ID: <4AAFD3F8.9040509@scottdial.com> Florian Schulze wrote: > On Fri, 17 Jul 2009 04:49:20 +0200, Christian Tismer > wrote: >> Announcing Psyco V2 source release >> ---------------------------------- > > I just now saw this announcement. My biggest question is, what would it > take to support 64 bit? I just recently switched to Snow Leopard and most > of the deployments I manage are on 64 bit now. I looked at this once before because I had a compute-heavy python project on an x86_64 box. I seem to recall a lot of the code made assumptions about the bit-width of C types and needed to have the data structures updated to use things like Py_ssize_t and the like to talk to the python interpreter. A lot of the assembly code should be the same except the stackframe offsets have changed. However, a good implementation would take advantage of the extra registers, which would require some tweaking. I quit pursuing it because it was not something I was motivated to do and I can only imagine the level of frustration I would've developed debugging that sort of code. -Scott -- Scott Dial scott at scottdial.com scodial at cs.indiana.edu From iko at openend.se Mon Sep 21 17:27:04 2009 From: iko at openend.se (Anders Hammarquist) Date: Mon, 21 Sep 2009 17:27:04 +0200 Subject: [pypy-dev] benchmarking input Message-ID: <200909211527.n8LFR4xQ028739@theraft.openend.se> Hi all, I need some input for the benchmarking infrastructure. I'm nearly at the point where I need to have some place to run it before continuing (i.e. I need to try and use it, not just speculate). Anyway, what I was thinking about, and need input on, is how to get at the interpreters to run the benchmark. When we were talking just benchmarks, and not profiling, my thought was to just use whatever python the machine has, and fetch the pypy from the last buildbot run, but for profiling that will not work (and anyway, running the profiling on the standard python is quite pointless). So benchmarks will obviously have to specify what interpreter(s) they should be run by somehow. The bigger question is how to get those interpreters. Should running the benchmarks also trigger building one (or more) pypy interpreters according to specs in the benchmarking framework? (but then if you only want it to run one benchmark, you may have to wait for all the interpreters to build) Perhaps each benchmark should build its own interpreter (though this seems slow, given that most benchmarks can probably run on an identically built interpreter). Or maybe the installed infrastructure should only care about history, and if you want to run a single benchmark, you do that on your own. Thoughts please! /Anders From anto.cuni at gmail.com Mon Sep 21 20:01:44 2009 From: anto.cuni at gmail.com (Antonio Cuni) Date: Mon, 21 Sep 2009 20:01:44 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <200909211527.n8LFR4xQ028739@theraft.openend.se> References: <200909211527.n8LFR4xQ028739@theraft.openend.se> Message-ID: <4AB7BF88.7080400@gmail.com> Anders Hammarquist wrote: > Hi all, Hi Iko, hi all [cut] > So benchmarks > will obviously have to specify what interpreter(s) they should be run > by somehow. I think this is a vital requirement. I can imagine various scenarios where you want to specify which interpreters to benchmark, e.g.: 1) benchmarking pypy-cli vs IronPython or pypy-jvm vs Jython 2) benchmarking pypy-cs at different svn revisions 3) benchmarking pypy-c-trunk vs pypy-c-some-branch (maybe with the possibility of specifying pypy-c-trunk-at-the-revision-where-the-branch-was-created, to avoid noise) 4) benchmarking pypy-cs with different build options 5) bencharmking with profiling enabled (I'd say that profiling should be off by default) > The bigger question is how to get those interpreters. Should running > the benchmarks also trigger building one (or more) pypy interpreters > according to specs in the benchmarking framework? (but then if you > only want it to run one benchmark, you may have to wait for all the > interpreters to build) Perhaps each benchmark should build its own > interpreter (though this seems slow, given that most benchmarks > can probably run on an identically built interpreter). > > Or maybe the installed infrastructure should only care about history, > and if you want to run a single benchmark, you do that on your own. Conceptually, I would say that you need to rebuild the required pypys every time you run the benchmarks. Concretely, we can think of putting them into a cache, so that if you need a pypy-c that for some reason has already been built, you just reuse it. Moreover, it could be nice if you could select the pypy to benchmark from a list of already built pypys, if you want to same time. Also, we may need to think how to deal with excessive loading: if everyone of us tries to run his own set of benchmark, the benchmarking machine could become too overloaded to be useful in any sense. ciao, Anto From tobami at googlemail.com Mon Sep 21 22:35:18 2009 From: tobami at googlemail.com (Miquel Torres) Date: Mon, 21 Sep 2009 22:35:18 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) Message-ID: Hi, this is my first post on the pypy-dev mailing list. I've commented on the pypy blog, and was encouraged by fijal to continue here. My daily job involves some web coding (HTML, javascript, Django...), python coding (frameworks, scipts), usability analysis, and opensource project management among other tasks, so I have some ideas on how to improve pypy's website, as well as the project's visibility. I find the pypy project extremely interesting (and important!), and I think that once a version of the JIT gets into a release the project will get a lot more attention. There are two things I want to discuss: One is improving pypy's main website. While the current site has served its purpose, it is mostly a pypy developers site. Better structure and navigation would be desirable when pypy becomes popular among mainstream python developers. So there are two options: to keep the current http://codespeak.net/pypy/dist/pypy/doc/index.html site for developers and develop a new www.pypy.org site, or improve the current one. The tasks to perform would be: - Agree on a new website or keeping and improving the current one - Choose a CMS (or hand-code or whatever) to craft the website - Define a navigation menu with key areas (about, download, news, roadmap, benchmarks, developement...) - Visual design - Code ;.) I can help with some (or all) of these tasks. Another matter are benchmarks. Because it is the project's most visible "feature" or "result", it would be great to publish a set of benchmarks so that python users can keep track of performance across different versions (cpython 2.6 vs pypy1.1, Jython, etc...). That way they can keep track of performance improvements as well as decide when it becomes attractive for them to make the switch from cpython. It would be the best advertisement for the project. The best case would be if you internally perform performance test to prevent performance regression on new releases, and that same data could be also be automatically published on the web, in the dev pages during development, and .in the "public" pages for final releases. So the tasks here would be: - Define a set of standard benchmarks that will serve as performance tests for every new release (including alphas and betas) - Create a script that gathers all the data for developers to analyse and spot performance regressions and bugs AND outputs the data in such a way that it can be automatically published on the website (so no extra maintenance workload) - Code the web page that beautifully shows the data in a suitable format (tables, graphs) I have recently done some work on dynamic javascript (or python) plotting, so I can take care of the last part relatively easily. I could also help with the second task. So I leave it there for you to discuss. What do you think of it all? Cheers, Miquel -------------- next part -------------- An HTML attachment was scrubbed... URL: From holger at merlinux.eu Tue Sep 22 09:28:34 2009 From: holger at merlinux.eu (holger krekel) Date: Tue, 22 Sep 2009 09:28:34 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: References: Message-ID: <20090922072834.GO15455@trillke.net> Hi Miquel, thanks for your mail and great offers! I setup much of the current website infrastructure and agree there is lots of room for improvements and that it gets about time. I can imagine you could start immediately with helping in the benchmarking visualization area. Do you by chance happen to be able to come to the prospective 6-13th November PyPy-Sprint in Duesseldorf? cheers, holger On Mon, Sep 21, 2009 at 22:35 +0200, Miquel Torres wrote: > Hi, this is my first post on the pypy-dev mailing list. I've commented on > the pypy blog, and was encouraged by fijal to continue here. > My daily job involves some web coding (HTML, javascript, Django...), python > coding (frameworks, scipts), usability analysis, and opensource project > management among other tasks, so I have some ideas on how to improve pypy's > website, as well as the project's visibility. I find the pypy project > extremely interesting (and important!), and I think that once a version of > the JIT gets into a release the project will get a lot more attention. > > There are two things I want to discuss: > > One is improving pypy's main website. While the current site has served its > purpose, it is mostly a pypy developers site. Better structure and > navigation would be desirable when pypy becomes popular among mainstream > python developers. So there are two options: to keep the current > http://codespeak.net/pypy/dist/pypy/doc/index.html site for developers and > develop a new www.pypy.org site, or improve the current one. > > The tasks to perform would be: > - Agree on a new website or keeping and improving the current one > - Choose a CMS (or hand-code or whatever) to craft the website > - Define a navigation menu with key areas (about, download, news, > roadmap, benchmarks, developement...) > - Visual design > - Code ;.) > > I can help with some (or all) of these tasks. > > Another matter are benchmarks. Because it is the project's most visible > "feature" or "result", it would be great to publish a set of benchmarks so > that python users can keep track of performance across different versions > (cpython 2.6 vs pypy1.1, Jython, etc...). That way they can keep track of > performance improvements as well as decide when it becomes attractive for > them to make the switch from cpython. It would be the best advertisement for > the project. The best case would be if you internally perform performance > test to prevent performance regression on new releases, and that same data > could be also be automatically published on the web, in the dev pages > during development, and .in the "public" pages for final releases. > > So the tasks here would be: > - Define a set of standard benchmarks that will serve as performance tests > for every new release (including alphas and betas) > - Create a script that gathers all the data for developers to analyse and > spot performance regressions and bugs AND outputs the data in such a way > that it can be automatically published on the website (so no extra > maintenance workload) > - Code the web page that beautifully shows the data in a suitable format > (tables, graphs) > > I have recently done some work on dynamic javascript (or python) plotting, > so I can take care of the last part relatively easily. I could also help > with the second task. > > So I leave it there for you to discuss. What do you think of it all? > > Cheers, > > Miquel > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev -- Metaprogramming, Python, Testing: http://tetamap.wordpress.com Python, PyPy, pytest contracting: http://merlinux.eu From tobami at googlemail.com Tue Sep 22 15:58:46 2009 From: tobami at googlemail.com (Miquel Torres) Date: Tue, 22 Sep 2009 15:58:46 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: <20090922072834.GO15455@trillke.net> References: <20090922072834.GO15455@trillke.net> Message-ID: Hi Holger, I'm glad if I can be of any help. > I can imagine you could start immediately with helping in the > benchmarking visualization area. Fine. The first step would be to agree on how the benchmark data will be saved (a text or xml format?. to the website's backend DB?) so that the test suite is designed that way from the beginning. Is there a pypy wiki?, how do you document the possibilities and final decisions on such matters?. We also need to agree on how the process to improve the website should look like (see, I'm a newcomer, I don't know how you go about things here :-) > Do you by chance happen to be able to come to > the prospective 6-13th November PyPy-Sprint in Duesseldorf? I was planning to travel to Germany over the next few months, but in November I don't know whether time nor funds will permit. I'll look into it, it would surely be awesome!. Cheers, Miquel 2009/9/22 holger krekel > Hi Miquel, > > thanks for your mail and great offers! I setup much of the current website > infrastructure and agree there is lots of room for improvements and that it > gets about time. I can imagine you could start immediately with helping in > the > benchmarking visualization area. Do you by chance happen to be able to > come to > the prospective 6-13th November PyPy-Sprint in Duesseldorf? > > cheers, > holger > > On Mon, Sep 21, 2009 at 22:35 +0200, Miquel Torres wrote: > > Hi, this is my first post on the pypy-dev mailing list. I've commented on > > the pypy blog, and was encouraged by fijal to continue here. > > My daily job involves some web coding (HTML, javascript, Django...), > python > > coding (frameworks, scipts), usability analysis, and opensource project > > management among other tasks, so I have some ideas on how to improve > pypy's > > website, as well as the project's visibility. I find the pypy project > > extremely interesting (and important!), and I think that once a version > of > > the JIT gets into a release the project will get a lot more attention. > > > > There are two things I want to discuss: > > > > One is improving pypy's main website. While the current site has served > its > > purpose, it is mostly a pypy developers site. Better structure and > > navigation would be desirable when pypy becomes popular among mainstream > > python developers. So there are two options: to keep the current > > http://codespeak.net/pypy/dist/pypy/doc/index.html site for developers > and > > develop a new www.pypy.org site, or improve the current one. > > > > The tasks to perform would be: > > - Agree on a new website or keeping and improving the current one > > - Choose a CMS (or hand-code or whatever) to craft the website > > - Define a navigation menu with key areas (about, download, news, > > roadmap, benchmarks, developement...) > > - Visual design > > - Code ;.) > > > > I can help with some (or all) of these tasks. > > > > Another matter are benchmarks. Because it is the project's most visible > > "feature" or "result", it would be great to publish a set of benchmarks > so > > that python users can keep track of performance across different versions > > (cpython 2.6 vs pypy1.1, Jython, etc...). That way they can keep track of > > performance improvements as well as decide when it becomes attractive for > > them to make the switch from cpython. It would be the best advertisement > for > > the project. The best case would be if you internally perform performance > > test to prevent performance regression on new releases, and that same > data > > could be also be automatically published on the web, in the dev pages > > during development, and .in the "public" pages for final releases. > > > > So the tasks here would be: > > - Define a set of standard benchmarks that will serve as performance > tests > > for every new release (including alphas and betas) > > - Create a script that gathers all the data for developers to analyse and > > spot performance regressions and bugs AND outputs the data in such a way > > that it can be automatically published on the website (so no extra > > maintenance workload) > > - Code the web page that beautifully shows the data in a suitable format > > (tables, graphs) > > > > I have recently done some work on dynamic javascript (or python) > plotting, > > so I can take care of the last part relatively easily. I could also help > > with the second task. > > > > So I leave it there for you to discuss. What do you think of it all? > > > > Cheers, > > > > Miquel > > > _______________________________________________ > > pypy-dev at codespeak.net > > http://codespeak.net/mailman/listinfo/pypy-dev > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From fijall at gmail.com Thu Sep 24 16:38:14 2009 From: fijall at gmail.com (fijall) Date: Thu, 24 Sep 2009 08:38:14 -0600 Subject: [pypy-dev] benchmarking input In-Reply-To: <200909211527.n8LFR4xQ028739@theraft.openend.se> References: <200909211527.n8LFR4xQ028739@theraft.openend.se> Message-ID: <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> Hi Anders. I, personally, would start with a more modest goal than fully running infrastructure. I would like to be able to run it myself, provided I have downloaded and compiled all necessary interpreters. So say, I want to run benchmarks a, b, c using python, pypy-a, pypy-b and pypy-c. And I say something like: ./run_benchmarks --benchmarks="a b c" --interpreters="pypy-a pypy-b python pypy-c" And get some sort of results, to start with in a text form. Next step would be to have a backend that stores informations between runs, but I would really really like to go with incremental approach, where I have something to start with and later on improve and add features. PS. Sorry for late reply, was on holiday. Cheers, fijal On Mon, Sep 21, 2009 at 9:27 AM, Anders Hammarquist wrote: > Hi all, > > I need some input for the benchmarking infrastructure. I'm nearly > at the point where I need to have some place to run it before > continuing (i.e. I need to try and use it, not just speculate). > > Anyway, what I was thinking about, and need input on, is how to get > at the interpreters to run the benchmark. When we were talking just > benchmarks, and not profiling, my thought was to just use whatever > python the machine has, and fetch the pypy from the last buildbot > run, but for profiling that will not work (and anyway, running the > profiling on the standard python is quite pointless). So benchmarks > will obviously have to specify what interpreter(s) they should be run > by somehow. > > The bigger question is how to get those interpreters. Should running > the benchmarks also trigger building one (or more) pypy interpreters > according to specs in the benchmarking framework? (but then if you > only want it to run one benchmark, you may have to wait for all the > interpreters to build) Perhaps each benchmark should build its own > interpreter (though this seems slow, given that most benchmarks > can probably run on an identically built interpreter). > > Or maybe the installed infrastructure should only care about history, > and if you want to run a single benchmark, you do that on your own. > > Thoughts please! > > /Anders > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > > > > From fijall at gmail.com Thu Sep 24 16:45:41 2009 From: fijall at gmail.com (fijall) Date: Thu, 24 Sep 2009 08:45:41 -0600 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: References: Message-ID: <693bc9ab0909240745q36309152mc995169e4a59273c@mail.gmail.com> Hi. First of all, thanks a lot for your interest. In general, my ideas are very much in line with what you said. I'll try to answer specific questions one by one. > [snip] > current?http://codespeak.net/pypy/dist/pypy/doc/index.html?site for > developers and develop a new?www.pypy.org?site, or improve the current one. I would go for www.pypy.org website, dedicated for potential users and to keep current website under current address for potential developers. > The tasks to perform would be: > - Agree on a new website or keeping and improving the current one > - Choose a CMS (or hand-code or whatever)?to craft the website > - Define a navigation menu with key areas (about, download, news, > roadmap,?benchmarks,?developement...) > - Visual design > - Code ;.) I would very much like to discuss this part in details with you. If you can make it to pypy sprint, would be awesome, if not, we should definitely at least chat online. > I can help with some (or all) of?these?tasks. Great :-) > Another matter are benchmarks. Because it is the project's most visible > "feature" or "result", it would be great to publish a set of benchmarks so > that python users can keep track of performance across different versions > (cpython 2.6 vs?pypy1.1, Jython, etc...). That way they can keep track of > performance improvements as well as decide when it becomes?attractive?for > them to make the switch from cpython. It would be the best advertisement for > the project. The best case would be if you internally perform performance > test to prevent performance regression on new releases, and that same data > could be also be automatically published on the web, in the dev pages > during?development, and .in the "public" pages for final releases. > So the tasks here would be: > - Define a set of standard benchmarks that will serve as performance tests > for every new release (including alphas and betas) > - Create a script that gathers all the data for developers to?analyse?and > spot performance regressions and bugs AND outputs the data in such a way > that it can be automatically published on the website (so no extra > maintenance workload) > - Code the?web page?that beautifully shows the data in a suitable format > (tables, graphs) > I have recently done some work on dynamic javascript (or python) plotting, > so I can take care of the last part relatively easily. I could also help > with the second task. > So I leave it there for you to discuss. What do you think of it all? > Cheers, > Miquel > We have recently started some effort here: http://codespeak.net/svn/pypy/build/benchmark/ for the benchmarks and infrastructure. We also consider this effort as a very important one. As of yet, there is no web page automatically showing results, but let's do it bit by bit. I uploaded some potential benchmarks to directory there, so we have a starting point. Where would you start? Personally, I thinks it even makes sense to do some visual design of a web page as a starting point. Cheers, fijal PS. I was on holidays and now ill, hence long response time, sorry From cfbolz at gmx.de Thu Sep 24 18:28:06 2009 From: cfbolz at gmx.de (Carl Friedrich) Date: Thu, 24 Sep 2009 18:28:06 +0200 Subject: [pypy-dev] PyPy Sprint Announcement, Duesseldorf 6 Nov- 13 Nov Message-ID: <4ABB9E16.4020009@gmx.de> D?sseldorf PyPy sprint November 6 - November 13 2009 ===================================================== The next PyPy sprint will be held in the Computer Science department of Heinrich-Heine Universit?t D?sseldorf from the 6th to the 13th of November 2009. This is a fully public sprint, everyone is welcome to join us. Topics and goals ---------------- At the sprint we intend to work on the JIT generator in PyPy and on applying it to PyPy Python interpreter. The precise work that will be done is not fixed, as we don't know in which state the JIT will be in November. However, possible areas of work might include: - tweaking the interpreter/objspace to be more JIT-friendly, e.g. instance implementation code, call code - if there is interest starting non x86-32 JIT backends - trying out existing software to find features where the optimizations of the JIT could be improved - improving our benchmarking infrastructure We will give special priority to topics that "non-core" people find interesting (as long as they are somehow JIT-related). For an introduction of how our JIT-generation process works, please refer to our blog: http://morepypy.blogspot.com/2009/03/jit-bit-of-look-inside.html There is also a more dense academic paper about the subject: http://codespeak.net/svn/pypy/extradoc/talk/icooolps2009/bolz-tracing-jit-final.pdf Location -------- The sprint will take place in a seminar room of the computer science department. It is in the building 25.12 of the university campus. For travel instructions see http://stups.cs.uni-duesseldorf.de/anreise/esbahn.php Registration ------------ If you'd like to come, please subscribe to the `pypy-sprint mailing list`_ and drop a note about your interests and post any questions. More organisational information will be send to that list. We'll keep a list of `people`_ which we'll update (which you can do so yourself if you have codespeak commit rights). .. _`pypy-sprint mailing list`: http://codespeak.net/mailman/listinfo/pypy-sprint .. _`people`: http://codespeak.net/pypy/extradoc/sprintinfo/ddorf2009/people.txt From fijall at gmail.com Thu Sep 24 18:39:13 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Thu, 24 Sep 2009 10:39:13 -0600 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: References: Message-ID: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> Hi. First of all, thanks a lot for your interest. In general, my ideas are very much in line with what you said. I'll try to answer specific questions one by one. > [snip] > current?http://codespeak.net/pypy/dist/pypy/doc/index.html?site for > developers and develop a new?www.pypy.org?site, or improve the current one. I would go for www.pypy.org website, dedicated for potential users and to keep current website under current address for potential developers. > The tasks to perform would be: > - Agree on a new website or keeping and improving the current one > - Choose a CMS (or hand-code or whatever)?to craft the website > - Define a navigation menu with key areas (about, download, news, > roadmap,?benchmarks,?developement...) > - Visual design > - Code ;.) I would very much like to discuss this part in details with you. If you can make it to pypy sprint, would be awesome, if not, we sh > I can help with some (or all) of?these?tasks. > Another matter are benchmarks. Because it is the project's most visible > "feature" or "result", it would be great to publish a set of benchmarks so > that python users can keep track of performance across different versions > (cpython 2.6 vs?pypy1.1, Jython, etc...). That way they can keep track of > performance improvements as well as decide when it becomes?attractive?for > them to make the switch from cpython. It would be the best advertisement for > the project. The best case would be if you internally perform performance > test to prevent performance regression on new releases, and that same data > could be also be automatically published on the web, in the dev pages > during?development, and .in the "public" pages for final releases. > So the tasks here would be: > - Define a set of standard benchmarks that will serve as performance tests > for every new release (including alphas and betas) > - Create a script that gathers all the data for developers to?analyse?and > spot performance regressions and bugs AND outputs the data in such a way > that it can be automatically published on the website (so no extra > maintenance workload) > - Code the?web page?that beautifully shows the data in a suitable format > (tables, graphs) > I have recently done some work on dynamic javascript (or python) plotting, > so I can take care of the last part relatively easily. I could also help > with the second task. > So I leave it there for you to discuss. What do you think of it all? > Cheers, > Miquel > > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > From lac at openend.se Fri Sep 25 11:20:27 2009 From: lac at openend.se (Laura Creighton) Date: Fri, 25 Sep 2009 11:20:27 +0200 Subject: [pypy-dev] new tool I hadn't seen before for realtime collaboration of text documents Message-ID: <200909250920.n8P9KRiQ013960@theraft.openend.se> http://etherpad.com/ Could possibly be useful in the future, especially if we want to collaborate writing docs with people who don't have a codespeak account. Laura From tobami at googlemail.com Fri Sep 25 14:20:54 2009 From: tobami at googlemail.com (Miquel Torres) Date: Fri, 25 Sep 2009 14:20:54 +0200 Subject: [pypy-dev] new tool I hadn't seen before for realtime collaboration of text documents In-Reply-To: <200909250920.n8P9KRiQ013960@theraft.openend.se> References: <200909250920.n8P9KRiQ013960@theraft.openend.se> Message-ID: yep, I find it very useful. You can even create a team account with up to 3 pads you can keep track of. 2009/9/25 Laura Creighton > http://etherpad.com/ > > Could possibly be useful in the future, especially if we want to > collaborate writing docs with people who don't have a codespeak account. > > Laura > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > -------------- next part -------------- An HTML attachment was scrubbed... URL: From iko at openend.se Fri Sep 25 17:23:25 2009 From: iko at openend.se (Anders Hammarquist) Date: Fri, 25 Sep 2009 17:23:25 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: Message from fijall of "Thu, 24 Sep 2009 08:38:14 MDT." <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> References: <200909211527.n8LFR4xQ028739@theraft.openend.se><693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> Message-ID: <200909251523.n8PFNP1j002726@fido.openend.se> Since everyone but me seemed to want to take the discussion on IRC instead, I propose we meet up on #pypy-sync on monday at 14:00 /Anders From santagada at gmail.com Fri Sep 25 23:52:44 2009 From: santagada at gmail.com (Leonardo Santagada) Date: Fri, 25 Sep 2009 18:52:44 -0300 Subject: [pypy-dev] benchmarking input In-Reply-To: <200909251523.n8PFNP1j002726@fido.openend.se> References: <200909211527.n8LFR4xQ028739@theraft.openend.se><693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> <200909251523.n8PFNP1j002726@fido.openend.se> Message-ID: On Sep 25, 2009, at 12:23 PM, Anders Hammarquist wrote: > Since everyone but me seemed to want to take the discussion on IRC > instead, > I propose we meet up on #pypy-sync on monday at 14:00 > > /Anders I want to know why PyPy doesn't use the unladen swallow benchmarks in complement to the ones already there and maybe reuse and extend their reporting tools. This could make comparing results easier and divide the work of creating comprehensive benchmarks for python. I could wait to ask this monday, but email is interesting in that people can take their time to answer things. -- Leonardo Santagada santagada at gmail.com From andrewfr_ice at yahoo.com Sat Sep 26 00:25:23 2009 From: andrewfr_ice at yahoo.com (Andrew Francis) Date: Fri, 25 Sep 2009 15:25:23 -0700 (PDT) Subject: [pypy-dev] Stackless Python and PyPy Stackless.py Message-ID: <622253.76367.qm@web112410.mail.gq1.yahoo.com> Hi Folks: Again as a part of my Stackless Python talk, I wanted to include a section on the "Future." I assume a part of Stackless Python's future is PyPy? Or am I being presumptuous? Regardless I would like to end the talk with a brief section on PyPy. I noticed the Stackless.py module in lib that contains the Stackless implementation in Python. What I plan to do in my talk is show how a rough approximation of Limbo's alt (selecting the first ready channel from a list) could be implemented. I am a newbie in regards to PyPy. However I have been reading the Stackless documentation. I thought it would be neat if I ended the talk with redoing this, but in PyPy as a part of how one could quickly prototype new Stackless Python features. Any thoughts? Is there anything gotchas? Cheers, Andrew From micahel at gmail.com Sun Sep 27 00:29:44 2009 From: micahel at gmail.com (Michael Hudson-Doyle) Date: Sun, 27 Sep 2009 10:29:44 +1200 Subject: [pypy-dev] Advice for a talk... Message-ID: I'm talking about PyPy at KiwiPyCon in a few weeks: http://nz.pycon.org/talks/talk/3/ My basic plan is to give a similar talk to the one I gave at OSDC last year (http://codespeak.net/svn/pypy/extradoc/talk/osdc2008/osdc08.pdf, which in turn is pretty similar to talks I gave back in 2006...). I've not been following recent progress super closely, so if someone who's been more involved could read the OSDC talk and point out things that are no longer true or not really relevant any more, that would be great :) I guess I should say a bit about the current approach to the JIT... Cheers, mwh From fijall at gmail.com Sun Sep 27 09:19:25 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Sun, 27 Sep 2009 01:19:25 -0600 Subject: [pypy-dev] Advice for a talk... In-Reply-To: References: Message-ID: <693bc9ab0909270019l78af72d7od2df0f0e2c395f02@mail.gmail.com> As Jacob pointed, we don't have half-maintained backends any more. You can have a bit more info about the JIT if you like, since this is what we're currently focused on (look at pypy blog for details). The GCs are rather good, there are no plans to improve them in the near future I guess. Impressive demo is to run the gameboy interpreter and play mario (targetgbstandalone I suppose). Cheers, fijal On Sat, Sep 26, 2009 at 4:29 PM, Michael Hudson-Doyle wrote: > I'm talking about PyPy at KiwiPyCon in a few weeks: > http://nz.pycon.org/talks/talk/3/ > > My basic plan is to give a similar talk to the one I gave at OSDC last > year (http://codespeak.net/svn/pypy/extradoc/talk/osdc2008/osdc08.pdf, > which in turn is pretty similar to talks I gave back in 2006...). > I've not been following recent progress super closely, so if someone > who's been more involved could read the OSDC talk and point out things > that are no longer true or not really relevant any more, that would be > great :) ?I guess I should say a bit about the current approach to the > JIT... > > Cheers, > mwh > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > From arigo at tunes.org Sun Sep 27 17:47:24 2009 From: arigo at tunes.org (Armin Rigo) Date: Sun, 27 Sep 2009 17:47:24 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: References: <200909251523.n8PFNP1j002726@fido.openend.se> Message-ID: <20090927154724.GA14582@code0.codespeak.net> Hi Leonardo, On Fri, Sep 25, 2009 at 06:52:44PM -0300, Leonardo Santagada wrote: > I want to know why PyPy doesn't use the unladen swallow benchmarks in > complement to the ones already there and maybe reuse and extend their > reporting tools. This could make comparing results easier and divide > the work of creating comprehensive benchmarks for python. A number of benchmarks are not applicable to us, or they are uninteresting at this point (e.g. pickling, regexp, or just microbenchmarks...). That would leave 2 usable benchmarks, at a first glance: 'ai', and possibly 'spitfire/slowspitfire'. (Btw, I wonder why they think that richards is "too artificial" when they include a number of microbenchmarks that look far more artificial to me...) A bientot, Armin. From arigo at tunes.org Sun Sep 27 18:46:02 2009 From: arigo at tunes.org (Armin Rigo) Date: Sun, 27 Sep 2009 18:46:02 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: References: Message-ID: <20090927164602.GB14582@code0.codespeak.net> Hi Miquel, On Mon, Sep 21, 2009 at 10:35:18PM +0200, Miquel Torres wrote: > Another matter are benchmarks. In addition to the other people's comments, I want to point out that we already have some graphic benchmarks (http://tuatara.cs.uni-duesseldorf.de/plots.html). It's completely custom-made, though; not something nicely designed and generally usable, at least yet. A bientot, Armin. From arigo at tunes.org Sun Sep 27 19:01:14 2009 From: arigo at tunes.org (Armin Rigo) Date: Sun, 27 Sep 2009 19:01:14 +0200 Subject: [pypy-dev] Stackless Python and PyPy Stackless.py In-Reply-To: <622253.76367.qm@web112410.mail.gq1.yahoo.com> References: <622253.76367.qm@web112410.mail.gq1.yahoo.com> Message-ID: <20090927170113.GC14582@code0.codespeak.net> Hi Andrew, On Fri, Sep 25, 2009 at 03:25:23PM -0700, Andrew Francis wrote: > Again as a part of my Stackless Python talk, I wanted to include a > section on the "Future." I assume a part of Stackless Python's future > is PyPy? Or am I being presumptuous? The future is not completely clear. PyPy does contain what is, as far as I know, a full stackless implementation. However it is incompatible with some of the most modern extensions of PyPy (notably the JIT), at least for now; it could be fixed in the future if there is interest. > Regardless I would like to end the talk with a brief section on PyPy. > I noticed the Stackless.py module in lib that contains the Stackless > implementation in Python. Yes, this is an implementation of Stackless Python on top of some lower-level mechanism provided natively in PyPy. It might give good opportunities for changes and experimentation, definitely. However, if you end up needing new features from the lower-level mechanism, you might have to look into 'pypy/module/_stackless' or 'pypy/rlib/coroutine.py'. (Changes in these parts require re-translating the pypy-c you are using, whereas changes in 'pypy/lib/*' do not.) A bientot, Armin. From santagada at gmail.com Mon Sep 28 04:04:26 2009 From: santagada at gmail.com (Leonardo Santagada) Date: Sun, 27 Sep 2009 23:04:26 -0300 Subject: [pypy-dev] benchmarking input In-Reply-To: <20090927154724.GA14582@code0.codespeak.net> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> Message-ID: <24DC8C95-7856-4901-A020-C97264070725@gmail.com> On Sep 27, 2009, at 12:47 PM, Armin Rigo wrote: > Hi Leonardo, > > On Fri, Sep 25, 2009 at 06:52:44PM -0300, Leonardo Santagada wrote: >> I want to know why PyPy doesn't use the unladen swallow benchmarks in >> complement to the ones already there and maybe reuse and extend their >> reporting tools. This could make comparing results easier and divide >> the work of creating comprehensive benchmarks for python. > > A number of benchmarks are not applicable to us, or they are > uninteresting at this point (e.g. pickling, regexp, or just > microbenchmarks...). Uninteresting for benchmarking the jit, but important for python users. > That would leave 2 usable benchmarks, at a first glance: 'ai', and > possibly 'spitfire/slowspitfire'. The django one is also interesting. > (Btw, I wonder why they think that richards is "too artificial" when > they include a number of microbenchmarks that look far more artificial > to me...) I thought that too... maybe just adding richards is okay, they can discard the results if they want. I think that talking to them and adding to their benchmarks. Maybe creating a python benchmark project on google to be moved together with the stdlib separation to python.org is a good idea to bring the community together. Using the same benchmark framework could help both pypy (they already process the benchmarks and do a form of reporting) and unladden swallow (probably all the benchmarks that pypy adds can show possible problems for their jit). If you would like to try this course I could talk to the guys there so to make a separate project... maybe even start sharing stdlib tests like you talked about on pycon 09. -- Leonardo Santagada santagada at gmail.com From benjamin at python.org Mon Sep 28 04:49:28 2009 From: benjamin at python.org (Benjamin Peterson) Date: Sun, 27 Sep 2009 21:49:28 -0500 Subject: [pypy-dev] benchmarking input In-Reply-To: <24DC8C95-7856-4901-A020-C97264070725@gmail.com> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> <24DC8C95-7856-4901-A020-C97264070725@gmail.com> Message-ID: <1afaf6160909271949t7962c23co2dc67c0da41c2e61@mail.gmail.com> 2009/9/27 Leonardo Santagada : > > On Sep 27, 2009, at 12:47 PM, Armin Rigo wrote: > >> Hi Leonardo, >> >> On Fri, Sep 25, 2009 at 06:52:44PM -0300, Leonardo Santagada wrote: >>> I want to know why PyPy doesn't use the unladen swallow benchmarks in >>> complement to the ones already there and maybe reuse and extend their >>> reporting tools. This could make comparing results easier and divide >>> the work of creating comprehensive benchmarks for python. >> >> A number of benchmarks are not applicable to us, or they are >> uninteresting at this point (e.g. pickling, regexp, or just >> microbenchmarks...). > > Uninteresting for benchmarking the jit, but important for python users. Well, at the moment it's not actually possible to use the unladen swallow benchmarks because the JIT has no thread support. -- Regards, Benjamin From santagada at gmail.com Mon Sep 28 06:40:15 2009 From: santagada at gmail.com (Leonardo Santagada) Date: Mon, 28 Sep 2009 01:40:15 -0300 Subject: [pypy-dev] benchmarking input In-Reply-To: <1afaf6160909271949t7962c23co2dc67c0da41c2e61@mail.gmail.com> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> <24DC8C95-7856-4901-A020-C97264070725@gmail.com> <1afaf6160909271949t7962c23co2dc67c0da41c2e61@mail.gmail.com> Message-ID: <269959B0-8E62-4D23-B3E1-000E1BFAFD0F@gmail.com> On Sep 27, 2009, at 11:49 PM, Benjamin Peterson wrote: > 2009/9/27 Leonardo Santagada : >> >> On Sep 27, 2009, at 12:47 PM, Armin Rigo wrote: >> >>> Hi Leonardo, >>> >>> On Fri, Sep 25, 2009 at 06:52:44PM -0300, Leonardo Santagada wrote: >>>> I want to know why PyPy doesn't use the unladen swallow >>>> benchmarks in >>>> complement to the ones already there and maybe reuse and extend >>>> their >>>> reporting tools. This could make comparing results easier and >>>> divide >>>> the work of creating comprehensive benchmarks for python. >>> >>> A number of benchmarks are not applicable to us, or they are >>> uninteresting at this point (e.g. pickling, regexp, or just >>> microbenchmarks...). >> >> Uninteresting for benchmarking the jit, but important for python >> users. > > Well, at the moment it's not actually possible to use the unladen > swallow benchmarks because the JIT has no thread support. just for the record threads are being used for memory benchmarking, and I think that could be made using processes. -- Leonardo Santagada santagada at gmail.com From fijall at gmail.com Mon Sep 28 09:48:26 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Mon, 28 Sep 2009 01:48:26 -0600 Subject: [pypy-dev] benchmarking input In-Reply-To: <24DC8C95-7856-4901-A020-C97264070725@gmail.com> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> <24DC8C95-7856-4901-A020-C97264070725@gmail.com> Message-ID: <693bc9ab0909280048l1e5eac39scf4adf90eb6fa674@mail.gmail.com> >> >> A number of benchmarks are not applicable to us, or they are >> uninteresting at this point (e.g. pickling, regexp, or just >> microbenchmarks...). > > Uninteresting for benchmarking the jit, but important for python users. > And benchmarking the jit is what we're actually doing. >> That would leave 2 usable benchmarks, at a first glance: 'ai', and >> possibly 'spitfire/slowspitfire'. > > The django one is also interesting. This is rather dummy loop for template generation, rather than "django". You can probably reduce it to something as advanced as dictionary lookup + string concatenation in a loop. > >> (Btw, I wonder why they think that richards is "too artificial" when >> they include a number of microbenchmarks that look far more artificial >> to me...) > > I thought that too... maybe just adding richards is okay, they can > discard the results if they want. > I think richards does not reflect what they do at google (like pickling :-) From pommereau at univ-paris12.fr Mon Sep 28 09:31:20 2009 From: pommereau at univ-paris12.fr (Franck Pommereau) Date: Mon, 28 Sep 2009 09:31:20 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <200909211527.n8LFR4xQ028739@theraft.openend.se> References: <200909211527.n8LFR4xQ028739@theraft.openend.se> Message-ID: <4AC06648.8010100@univ-paris12.fr> > I need some input for the benchmarking infrastructure. I'm nearly > at the point where I need to have some place to run it before > continuing (i.e. I need to try and use it, not just speculate). This is not an answer to your question but might interest you. We it comes to benchmarking, the following paper is certainly worth reading: http://www-plan.cs.colorado.edu/diwan/asplos09.pdf It explains how many benchmarks are biased and result in faulty measures (in important proportions), and how this can be compensated. Cheers, Franck From cfbolz at gmx.de Mon Sep 28 10:58:23 2009 From: cfbolz at gmx.de (Carl Friedrich) Date: Mon, 28 Sep 2009 10:58:23 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <4AC06648.8010100@univ-paris12.fr> References: <200909211527.n8LFR4xQ028739@theraft.openend.se> <4AC06648.8010100@univ-paris12.fr> Message-ID: <4AC07AAF.4030707@gmx.de> Franck Pommereau wrote: >> I need some input for the benchmarking infrastructure. I'm nearly >> at the point where I need to have some place to run it before >> continuing (i.e. I need to try and use it, not just speculate). > > This is not an answer to your question but might interest you. > > We it comes to benchmarking, the following paper is certainly worth > reading: http://www-plan.cs.colorado.edu/diwan/asplos09.pdf > It explains how many benchmarks are biased and result in faulty measures > (in important proportions), and how this can be compensated. Seconded. This is an excellent paper. Btw, issues like this might be a reason to really reuse parts of the unladen-swallow benchmark runner, since they seem to have put work into doing the right thing from a statistics point of view. Cheers, Carl Friedrich From tobami at googlemail.com Mon Sep 28 11:22:35 2009 From: tobami at googlemail.com (Miquel Torres) Date: Mon, 28 Sep 2009 11:22:35 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> References: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> Message-ID: Hi fijal, >I would go for www.pypy.org website, dedicated for potential users I would also go for a separate pypy.org web. >We have recently started some effort here: >http://codespeak.net/svn/pypy/build/benchmark/ That is nice. One comment: in the future pypy will also need to be benchmarked on another operating system other than Linux I assume?. Shouldn't the benchmark infrastructure code be cross-platform from the beginning? I guess it is still a work in progress, but I had a brief look at the code, and interpreters.py uses os.fork(), for example. >Where would you start? >Personally, I thinks it even makes sense to do some visual >design of a web page as a starting point. You mean visually designing the web page first?. Why not. Only, we can surely design a nice css template and all, but it would be even nicer if a graphic designer would collaborate. And the question remains whether to hand code the web page or use a CMS, web framework, etc. It also depends on how you want to integrate benchmark data. We should definitely have a chat "meeting", with those interested in an end user's web page. Time? Miquel 2009/9/24 Maciej Fijalkowski > Hi. > > First of all, thanks a lot for your interest. In general, my ideas are very > much in line with what you said. I'll try to answer specific questions > one by one. > > > [snip] > > current http://codespeak.net/pypy/dist/pypy/doc/index.html site for > > developers and develop a new www.pypy.org site, or improve the current > one. > > I would go for www.pypy.org website, dedicated for potential users > and to keep current website under current address for potential developers. > > > The tasks to perform would be: > > - Agree on a new website or keeping and improving the current one > > - Choose a CMS (or hand-code or whatever) to craft the website > > - Define a navigation menu with key areas (about, download, news, > > roadmap, benchmarks, developement...) > > - Visual design > > - Code ;.) > > I would very much like to discuss this part in details with you. > If you can make it to pypy sprint, would be awesome, if not, > we sh > > > I can help with some (or all) of these tasks. > > Another matter are benchmarks. Because it is the project's most visible > > "feature" or "result", it would be great to publish a set of benchmarks > so > > that python users can keep track of performance across different versions > > (cpython 2.6 vs pypy1.1, Jython, etc...). That way they can keep track of > > performance improvements as well as decide when it becomes attractive for > > them to make the switch from cpython. It would be the best advertisement > for > > the project. The best case would be if you internally perform performance > > test to prevent performance regression on new releases, and that same > data > > could be also be automatically published on the web, in the dev pages > > during development, and .in the "public" pages for final releases. > > So the tasks here would be: > > - Define a set of standard benchmarks that will serve as performance > tests > > for every new release (including alphas and betas) > > - Create a script that gathers all the data for developers to analyse and > > spot performance regressions and bugs AND outputs the data in such a way > > that it can be automatically published on the website (so no extra > > maintenance workload) > > - Code the web page that beautifully shows the data in a suitable format > > (tables, graphs) > > I have recently done some work on dynamic javascript (or python) > plotting, > > so I can take care of the last part relatively easily. I could also help > > with the second task. > > So I leave it there for you to discuss. What do you think of it all? > > Cheers, > > Miquel > > > > _______________________________________________ > > pypy-dev at codespeak.net > > http://codespeak.net/mailman/listinfo/pypy-dev > > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From tobami at googlemail.com Mon Sep 28 11:28:02 2009 From: tobami at googlemail.com (Miquel Torres) Date: Mon, 28 Sep 2009 11:28:02 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: <20090927164602.GB14582@code0.codespeak.net> References: <20090927164602.GB14582@code0.codespeak.net> Message-ID: Hi Armin, >we already have some graphic benchmarks >(http://tuatara.cs.uni-duesseldorf.de/plots.html). Thanks for the pointer, I hadn't seen that. I guess the benchmark infrastructure should be completed first, and then the integration with a benchmarks web page can be made. If you store benchmark results in a database, a full-blown benchmarks page that is fully configurable is not difficult to code (I mean select interpreters, architecture and benchmark and display it). Cheers, Miquel 2009/9/27 Armin Rigo > Hi Miquel, > > On Mon, Sep 21, 2009 at 10:35:18PM +0200, Miquel Torres wrote: > > Another matter are benchmarks. > > In addition to the other people's comments, I want to > point out that we already have some graphic benchmarks > (http://tuatara.cs.uni-duesseldorf.de/plots.html). > It's completely custom-made, though; not something nicely > designed and generally usable, at least yet. > > > A bientot, > > Armin. > -------------- next part -------------- An HTML attachment was scrubbed... URL: From fijall at gmail.com Mon Sep 28 11:29:47 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Mon, 28 Sep 2009 03:29:47 -0600 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: References: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> Message-ID: <693bc9ab0909280229tc214e1dq9ffb3ffffc844956@mail.gmail.com> > > We should definitely have a chat "meeting", with those interested in an end > user's web page. > Time? > Most of people are in european timezone, so please suggest time according to that. I'm, personally, most of the time just on #pypy on freenode irc. Also, who wants to participate in such a meeting? Cheers, fijal From tobami at googlemail.com Mon Sep 28 12:32:54 2009 From: tobami at googlemail.com (Miquel Torres) Date: Mon, 28 Sep 2009 12:32:54 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: <693bc9ab0909280229tc214e1dq9ffb3ffffc844956@mail.gmail.com> References: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> <693bc9ab0909280229tc214e1dq9ffb3ffffc844956@mail.gmail.com> Message-ID: I'm personally available almost any evening from 22:00 to 00:00 (Berlin-Paris time). Alternatively from 9:30 to 11:30. Miquel 2009/9/28 Maciej Fijalkowski > > > > We should definitely have a chat "meeting", with those interested in an > end > > user's web page. > > Time? > > > > Most of people are in european timezone, so please suggest time > according to that. > I'm, personally, most of the time just on #pypy on freenode irc. > > Also, who wants to participate in such a meeting? > > Cheers, > fijal > -------------- next part -------------- An HTML attachment was scrubbed... URL: From fijall at gmail.com Mon Sep 28 12:53:37 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Mon, 28 Sep 2009 04:53:37 -0600 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: References: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> <693bc9ab0909280229tc214e1dq9ffb3ffffc844956@mail.gmail.com> Message-ID: <693bc9ab0909280353u6902fcd7hf155d7afbba85736@mail.gmail.com> Fine, what about you show up tomorrow morning we chat a bit and we can see who shows up? On Mon, Sep 28, 2009 at 4:32 AM, Miquel Torres wrote: > I'm personally available almost any evening from 22:00 to 00:00 > (Berlin-Paris time). > Alternatively from 9:30 to 11:30. > > Miquel > > > > 2009/9/28 Maciej Fijalkowski >> >> > >> > We should definitely have a chat "meeting", with those interested in an >> > end >> > user's web page. >> > Time? >> > >> >> Most of people are in european timezone, so please suggest time >> according to that. >> I'm, personally, most of the time just on #pypy on freenode irc. >> >> Also, who wants to participate in such a meeting? >> >> Cheers, >> fijal > > From tobami at googlemail.com Mon Sep 28 13:04:53 2009 From: tobami at googlemail.com (Miquel Torres) Date: Mon, 28 Sep 2009 13:04:53 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: <693bc9ab0909280353u6902fcd7hf155d7afbba85736@mail.gmail.com> References: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> <693bc9ab0909280229tc214e1dq9ffb3ffffc844956@mail.gmail.com> <693bc9ab0909280353u6902fcd7hf155d7afbba85736@mail.gmail.com> Message-ID: All right. I'll be there. 2009/9/28 Maciej Fijalkowski > Fine, what about you show up tomorrow morning we chat a bit > and we can see who shows up? > > On Mon, Sep 28, 2009 at 4:32 AM, Miquel Torres > wrote: > > I'm personally available almost any evening from 22:00 to 00:00 > > (Berlin-Paris time). > > Alternatively from 9:30 to 11:30. > > > > Miquel > > > > > > > > 2009/9/28 Maciej Fijalkowski > >> > >> > > >> > We should definitely have a chat "meeting", with those interested in > an > >> > end > >> > user's web page. > >> > Time? > >> > > >> > >> Most of people are in european timezone, so please suggest time > >> according to that. > >> I'm, personally, most of the time just on #pypy on freenode irc. > >> > >> Also, who wants to participate in such a meeting? > >> > >> Cheers, > >> fijal > > > > > -------------- next part -------------- An HTML attachment was scrubbed... URL: From arigo at tunes.org Mon Sep 28 13:46:46 2009 From: arigo at tunes.org (Armin Rigo) Date: Mon, 28 Sep 2009 13:46:46 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <1afaf6160909271949t7962c23co2dc67c0da41c2e61@mail.gmail.com> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> <24DC8C95-7856-4901-A020-C97264070725@gmail.com> <1afaf6160909271949t7962c23co2dc67c0da41c2e61@mail.gmail.com> Message-ID: <20090928114646.GA6023@code0.codespeak.net> Hi Benjamin, On Sun, Sep 27, 2009 at 09:49:28PM -0500, Benjamin Peterson wrote: > Well, at the moment it's not actually possible to use the unladen > swallow benchmarks because the JIT has no thread support. That can be a confusing statement. I added thread support to asmgcc a short while ago, so our JIT "mostly" supports threads, where "mostly" is defined as "yes it should support threads but we never really tried". A bientot, Armin. From lac at openend.se Mon Sep 28 14:23:16 2009 From: lac at openend.se (Laura Creighton) Date: Mon, 28 Sep 2009 14:23:16 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: Message from Miquel Torres of "Mon, 28 Sep 2009 11:22:35 +0200." References: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> Message-ID: <200909281223.n8SCNGiv005067@theraft.openend.se> In a message of Mon, 28 Sep 2009 11:22:35 +0200, Miquel Torres writes: >We should definitely have a chat "meeting", with those interested in an e >nd >user's web page. >Time? > >Miquel I am interested and I am pretty flexible about time, though _not this week, please_ (new product release, unrelated to PyPy on Thursday). Laura From lac at openend.se Mon Sep 28 14:39:56 2009 From: lac at openend.se (Laura Creighton) Date: Mon, 28 Sep 2009 14:39:56 +0200 Subject: [pypy-dev] Improving the pypy website (and benchmarks) In-Reply-To: Message from Laura Creighton of "Mon, 28 Sep 2009 14:23:16 +0200." <200909281223.n8SCNGiv005067@theraft.openend.se> References: <693bc9ab0909240939g793a41f4j50ace3e6021955c9@mail.gmail.com> <200909281223.n8SCNGiv005067@theraft.openend.se> Message-ID: <200909281239.n8SCduiU005849@theraft.openend.se> In a message of Mon, 28 Sep 2009 14:23:16 +0200, Laura Creighton writes: >In a message of Mon, 28 Sep 2009 11:22:35 +0200, Miquel Torres writes: > >>We should definitely have a chat "meeting", with those interested in an >e >>nd >>user's web page. >>Time? >> >>Miquel > >I am interested and I am pretty flexible about time, though >_not this week, please_ (new product release, unrelated to >PyPy on Thursday). > >Laura Well, if tomorrow is what people want, I can sort of be there at 9.30 tomorrow. Bea cannot, she is teaching in Stockholm at the time. And I expect to be way distracted by other things tomorrow. Laura From glavoie at gmail.com Mon Sep 28 15:04:42 2009 From: glavoie at gmail.com (Gabriel Lavoie) Date: Mon, 28 Sep 2009 09:04:42 -0400 Subject: [pypy-dev] Stackless Python and PyPy Stackless.py In-Reply-To: <622253.76367.qm@web112410.mail.gq1.yahoo.com> References: <622253.76367.qm@web112410.mail.gq1.yahoo.com> Message-ID: Hello Andrew, I'm currently experimenting with PyPy's implementation of Stackless to add new features for a university master degree project. I chose PyPy's implementation because it's easier to play with Python code than with C code. Also, since PyPy is "still experimental", it was the best implementation to choose to hack with and I don't regret my choice. What I'm trying to achieve is to add distributed features to Stackless: - Local and networked channels with automatic switch between both - Easy tasklet migration to a remote host, keeping the channel connections between tasklets. - Transparent/automatic dependencies migration when a tasklet is sent to a remote host. Most of the features are done and I'm currently working on the dependencies migration. The only bad part is that I'm doing this project part time since I have a full time job but I have to complete the programming part in the next two months (I've been working for too long on this). If you're interested to see what I've done, just ask! :) See ya, Gabriel 2009/9/25 Andrew Francis : > Hi Folks: > > Again as a part of my Stackless Python talk, I wanted to include a section on the "Future." I assume a part of Stackless Python's future is PyPy? Or am I being presumptuous? > > Regardless I would like to end the talk with a brief section on PyPy. I noticed the Stackless.py module in lib that contains the Stackless implementation in Python. > > What I plan to do in my talk is show how a rough approximation of Limbo's alt (selecting the first ready channel from a list) could be implemented. > > I am a newbie in regards to PyPy. However I have been reading the Stackless documentation. I thought it would be neat if I ended the talk with redoing this, but in PyPy as a part of how one could quickly prototype new Stackless Python features. Any thoughts? Is there anything gotchas? > > Cheers, > Andrew > > > > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > -- Gabriel Lavoie glavoie at gmail.com From benjamin at python.org Mon Sep 28 16:19:18 2009 From: benjamin at python.org (Benjamin Peterson) Date: Mon, 28 Sep 2009 09:19:18 -0500 Subject: [pypy-dev] benchmarking input In-Reply-To: <20090928114646.GA6023@code0.codespeak.net> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> <24DC8C95-7856-4901-A020-C97264070725@gmail.com> <1afaf6160909271949t7962c23co2dc67c0da41c2e61@mail.gmail.com> <20090928114646.GA6023@code0.codespeak.net> Message-ID: <1afaf6160909280719n7c40cc8ft6a113bbecbc33847@mail.gmail.com> 2009/9/28 Armin Rigo : > Hi Benjamin, > > On Sun, Sep 27, 2009 at 09:49:28PM -0500, Benjamin Peterson wrote: >> Well, at the moment it's not actually possible to use the unladen >> swallow benchmarks because the JIT has no thread support. > > That can be a confusing statement. ?I added thread support to asmgcc a > short while ago, so our JIT "mostly" supports threads, where "mostly" is > defined as "yes it should support threads but we never really tried". Well, -Ojit disables the thread module, which I would classify as "not supporting threads with the JIT yet". -- Regards, Benjamin From arigo at tunes.org Mon Sep 28 16:30:09 2009 From: arigo at tunes.org (Armin Rigo) Date: Mon, 28 Sep 2009 16:30:09 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <200909251523.n8PFNP1j002726@fido.openend.se> References: <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> <200909251523.n8PFNP1j002726@fido.openend.se> Message-ID: <20090928143009.GA19399@code0.codespeak.net> Hi, On Fri, Sep 25, 2009 at 05:23:25PM +0200, Anders Hammarquist wrote: > Since everyone but me seemed to want to take the discussion on IRC instead, > I propose we meet up on #pypy-sync on monday at 14:00 Outcome of this meeting: as a first step, we will do a system that runs nightly. It does a build of a pypy-c-jit, runs richards (only) and stores the result in a database format. Then it extracts the results from the database and write them as a static text or html file, for a web server. We will then see where to go from there. I think it's important to store the results in a DB instead of just as a static text or html file, for future refactorings; this issue blocked us when developing the tuatara benchmarks. A bientot, Armin. From holger at merlinux.eu Mon Sep 28 16:38:43 2009 From: holger at merlinux.eu (holger krekel) Date: Mon, 28 Sep 2009 16:38:43 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <20090928143009.GA19399@code0.codespeak.net> References: <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> <200909251523.n8PFNP1j002726@fido.openend.se> <20090928143009.GA19399@code0.codespeak.net> Message-ID: <20090928143843.GM15455@trillke.net> Hi Armin, On Mon, Sep 28, 2009 at 16:30 +0200, Armin Rigo wrote: > Hi, > > On Fri, Sep 25, 2009 at 05:23:25PM +0200, Anders Hammarquist wrote: > > Since everyone but me seemed to want to take the discussion on IRC instead, > > I propose we meet up on #pypy-sync on monday at 14:00 > > Outcome of this meeting: as a first step, we will do a system that runs > nightly. It does a build of a pypy-c-jit, runs richards (only) and > stores the result in a database format. Then it extracts the results > from the database and write them as a static text or html file, for a > web server. > > We will then see where to go from there. I think it's important to > store the results in a DB instead of just as a static text or html file, > for future refactorings; this issue blocked us when developing the > tuatara benchmarks. why is a DB table format better for refactoring than a text file? holger From lac at openend.se Mon Sep 28 20:46:42 2009 From: lac at openend.se (Laura Creighton) Date: Mon, 28 Sep 2009 20:46:42 +0200 Subject: [pypy-dev] has anybody submitted an abstract to PyCON USA? Message-ID: <200909281846.n8SIkgb0030377@theraft.openend.se> Time is running out.... Laura From fijall at gmail.com Mon Sep 28 20:57:27 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Mon, 28 Sep 2009 12:57:27 -0600 Subject: [pypy-dev] has anybody submitted an abstract to PyCON USA? In-Reply-To: <200909281846.n8SIkgb0030377@theraft.openend.se> References: <200909281846.n8SIkgb0030377@theraft.openend.se> Message-ID: <693bc9ab0909281157s4f4bba9bkbeeeef55f99d53f1@mail.gmail.com> I did not so far, but I plan to. On Mon, Sep 28, 2009 at 12:46 PM, Laura Creighton wrote: > Time is running out.... > > Laura > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > From andrewfr_ice at yahoo.com Tue Sep 29 00:12:58 2009 From: andrewfr_ice at yahoo.com (Andrew Francis) Date: Mon, 28 Sep 2009 15:12:58 -0700 (PDT) Subject: [pypy-dev] Stackless Python and PyPy Stackless.py In-Reply-To: Message-ID: <15650.42251.qm@web112415.mail.gq1.yahoo.com> Hi Gabriel: I believe you are in Montreal? And yes I would be interested in knowing about your work! A quick note - I will be giving a talk on Stackless on Wednesday. The talk may be too elementary for you. http://montrealpython.org/ And yes I am very interested in using PyPy for experimenting with new Stackless features. In my case, I am chiefly interested in workflow constructs. So stuff like the Limbo alt statement, join/wait, event handlers. What particularly interests me is prototyping the features in PyPy to get the behaviour and interface correct. Then moving the features to Stackless. Of course, this would require me to improve my knowledge of Stackless internals. In terms of pickling (I am assuming you are delving into this for process migration), I have done some work with swapping. My experience so far has been to do swapping, one needs to introduce some notion of 'process.' Cheers, Andrew --- On Mon, 9/28/09, Gabriel Lavoie wrote: > From: Gabriel Lavoie > Subject: Re: [pypy-dev] Stackless Python and PyPy Stackless.py > To: "Andrew Francis" > Cc: stackless at stackless.com, pypy-dev at codespeak.net, tismer at stackless.com > Date: Monday, September 28, 2009, 6:04 AM > Hello Andrew, > I'm currently experimenting with > PyPy's implementation of > Stackless to add new features for a university master > degree project. > I chose PyPy's implementation because it's easier to play > with Python > code than with C code. Also, since PyPy is "still > experimental", it > was the best implementation to choose to hack with and I > don't regret > my choice. What I'm trying to achieve is to add distributed > features > to Stackless: > > - Local and networked channels with automatic switch > between both > - Easy tasklet migration to a remote host, keeping the > channel > connections between tasklets. > - Transparent/automatic dependencies migration when a > tasklet is sent > to a remote host. > > Most of the features are done and I'm currently working on > the > dependencies migration. The only bad part is that I'm doing > this > project part time since I have a full time job but I have > to complete > the programming part in the next two months (I've been > working for too > long on this). > > If you're interested to see what I've done, just ask! :) > > See ya, > > Gabriel > > 2009/9/25 Andrew Francis : > > Hi Folks: > > > > Again as a part of my Stackless Python talk, I wanted > to include a section on the "Future." I assume a part of > Stackless Python's future is PyPy? Or am I being > presumptuous? > > > > Regardless I would like to end the talk with a brief > section on PyPy. I noticed the Stackless.py module in lib > that contains the Stackless implementation in Python. > > > > What I plan to do in my talk is show how a rough > approximation of Limbo's alt (selecting the first ready > channel from a list) could be implemented. > > > > I am a newbie in regards to PyPy. However I have been > reading the Stackless documentation. I thought it would be > neat if I ended the talk with redoing this, but in PyPy as a > part of how one could quickly prototype new Stackless Python > features. Any thoughts? Is there anything gotchas? > > > > Cheers, > > Andrew > > > > > > > > _______________________________________________ > > pypy-dev at codespeak.net > > http://codespeak.net/mailman/listinfo/pypy-dev > > > > > > -- > Gabriel Lavoie > glavoie at gmail.com > From glavoie at gmail.com Tue Sep 29 01:14:57 2009 From: glavoie at gmail.com (Gabriel Lavoie) Date: Mon, 28 Sep 2009 19:14:57 -0400 Subject: [pypy-dev] Stackless Python and PyPy Stackless.py In-Reply-To: <3D291582-7965-4F9E-933E-FA520540514D@gmail.com> References: <622253.76367.qm@web112410.mail.gq1.yahoo.com> <3D291582-7965-4F9E-933E-FA520540514D@gmail.com> Message-ID: 2009/9/28 Leonardo Santagada : > I am very interested, I thought to do something like it. Where could I see > your code? > > On Sep 28, 2009, at 10:04 AM, Gabriel Lavoie wrote: > >> Hello Andrew, >> ? ?I'm currently experimenting with PyPy's implementation of >> Stackless to add new features for a university master degree project. >> I chose PyPy's implementation because it's easier to play with Python >> code than with C code. Also, since PyPy is "still experimental", it >> was the best implementation to choose to hack with and I don't regret >> my choice. What I'm trying to achieve is to add distributed features >> to Stackless: >> >> - Local and networked channels with automatic switch between both >> - Easy tasklet migration to a remote host, keeping the channel >> connections between tasklets. >> - Transparent/automatic dependencies migration when a tasklet is sent >> to a remote host. >> >> Most of the features are done and I'm currently working on the >> dependencies migration. The only bad part is that I'm doing this >> project part time since I have a full time job but I have to complete >> the programming part in the next two months (I've been working for too >> long on this). >> >> If you're interested to see what I've done, just ask! :) >> >> See ya, >> >> Gabriel >> >> 2009/9/25 Andrew Francis : >>> >>> Hi Folks: >>> >>> Again as a part of my Stackless Python talk, I wanted to include a >>> section on the "Future." I assume a part of Stackless Python's future is >>> PyPy? Or am I being presumptuous? >>> >>> Regardless I would like to end the talk with a brief section on PyPy. I >>> noticed the Stackless.py module in lib that contains the Stackless >>> implementation in Python. >>> >>> What I plan to do in my talk is show how a rough approximation of Limbo's >>> alt (selecting the first ready channel from a list) could be implemented. >>> >>> I am a newbie in regards to PyPy. However I have been reading the >>> Stackless documentation. I thought it would be neat if I ended the talk with >>> redoing this, but in PyPy as a part of how one could quickly prototype new >>> Stackless Python features. Any thoughts? Is there anything gotchas? >>> >>> Cheers, >>> Andrew >>> >>> >>> >>> _______________________________________________ >>> pypy-dev at codespeak.net >>> http://codespeak.net/mailman/listinfo/pypy-dev >>> >> >> >> >> -- >> Gabriel Lavoie >> glavoie at gmail.com >> _______________________________________________ >> pypy-dev at codespeak.net >> http://codespeak.net/mailman/listinfo/pypy-dev > > -- > Leonardo Santagada > santagada at gmail.com > > > > I still haven't shown publicly my work. I'll try to quickly prepare something this week with my current test code samples and a quick description of the API. I think the code quality is pretty bad as this is my first real Python project. My priority is to have something that works before doing a big cleanup. Gabriel -- Gabriel Lavoie glavoie at gmail.com From tobami at googlemail.com Tue Sep 29 11:31:21 2009 From: tobami at googlemail.com (Miquel Torres) Date: Tue, 29 Sep 2009 11:31:21 +0200 Subject: [pypy-dev] Nightly Benchmarks Message-ID: Hi iko, we just had a #pypy discussion about setting up a benchmarks web page and I was told that you already have started some work in that direction, so it would be great to coordinate each other because I intend to implement the visualization part. (to all others that attended the chat, feel free to correct me on any point I do not correctly adress) (and to all others that did NOT attend, feel free to make suggestions :) We agreed on a Django DB backend, and plot rendering by the browser, so we need to talk about the way to store the nightly benchmark data into the DB. The end goal is to build a speed.pypy.org where you can compare all kind of pypy versions to cpython and other python interpreters and other languages in a similar vein to http://shootout.alioth.debian.org/u32q/benchmark.php?test=all&lang=java&lang2=python&box=1(but not a copy of). As a fist step we should adress the main interest right now: pypy performance evolution through trunk revisions. Can you please explain to me what you are doing in that area now? Best regards, Miquel -------------- next part -------------- An HTML attachment was scrubbed... URL: From renesd at gmail.com Tue Sep 29 12:58:19 2009 From: renesd at gmail.com (=?ISO-8859-1?Q?Ren=E9_Dudfield?=) Date: Tue, 29 Sep 2009 11:58:19 +0100 Subject: [pypy-dev] benchmarking input In-Reply-To: <693bc9ab0909280048l1e5eac39scf4adf90eb6fa674@mail.gmail.com> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> <24DC8C95-7856-4901-A020-C97264070725@gmail.com> <693bc9ab0909280048l1e5eac39scf4adf90eb6fa674@mail.gmail.com> Message-ID: <64ddb72c0909290358l153d8724g373cde71b2e71f16@mail.gmail.com> On Mon, Sep 28, 2009 at 8:48 AM, Maciej Fijalkowski wrote: > > I think richards does not reflect what they do at google (like pickling :-) > Hi, yeah, their project goals are to speed up things that are interesting for them. Not to speed up things in general - but to speed up google projects running on python. ie, they want to speed up their own code mostly. They also run a number of python projects test suites, and use them to benchmark too. I imagine they benchmark google apps internally too. their recent talk describes their recent work - and also the work of speedups like the wpython project: http://unladen-swallow.googlecode.com/files/Unladen_Swallow_PyCon.pdf wpython: http://code.google.com/p/wpython/ http://wpython.googlecode.com/files/Beyond%20Bytecode%20-%20A%20Wordcode-based%20Python.pdf The programming language shootout tests would seem a useful set of benchmarks to compare against other languages. eg. http://shootout.alioth.debian.org/u32/benchmark.php?test=all&lang=pypy&lang2=python&box=1 Note, they have old pythons there... that is cpython 2.5.2 and pypy 1.1 in their comparisons. cheers, ps. happy happy jit!! great work. -------------- next part -------------- An HTML attachment was scrubbed... URL: From fijall at gmail.com Tue Sep 29 13:07:04 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Tue, 29 Sep 2009 05:07:04 -0600 Subject: [pypy-dev] benchmarking input In-Reply-To: <64ddb72c0909290358l153d8724g373cde71b2e71f16@mail.gmail.com> References: <200909251523.n8PFNP1j002726@fido.openend.se> <20090927154724.GA14582@code0.codespeak.net> <24DC8C95-7856-4901-A020-C97264070725@gmail.com> <693bc9ab0909280048l1e5eac39scf4adf90eb6fa674@mail.gmail.com> <64ddb72c0909290358l153d8724g373cde71b2e71f16@mail.gmail.com> Message-ID: <693bc9ab0909290407w40550519wfa3eaaadd20444db@mail.gmail.com> Hi. On Tue, Sep 29, 2009 at 4:58 AM, Ren? Dudfield wrote: > > > On Mon, Sep 28, 2009 at 8:48 AM, Maciej Fijalkowski > wrote: >> >> I think richards does not reflect what they do at google (like pickling >> :-) > > Hi, > > yeah, their project goals are to speed up things that are interesting for > them.? Not to speed up things in general - but to speed up google projects > running on python.? ie, they want to speed up their own code mostly.? They > also run a number of python projects test suites, and use them to benchmark > too.? I imagine they benchmark google apps internally too. > > their recent talk describes their recent work - and also the work of > speedups like the wpython project: > ??? http://unladen-swallow.googlecode.com/files/Unladen_Swallow_PyCon.pdf > > wpython: > ? ? http://code.google.com/p/wpython/ > > http://wpython.googlecode.com/files/Beyond%20Bytecode%20-%20A%20Wordcode-based%20Python.pdf thanks for links. > > > The programming language shootout tests would seem a useful set of > benchmarks to compare against other languages. > ? eg. > http://shootout.alioth.debian.org/u32/benchmark.php?test=all&lang=pypy&lang2=python&box=1 > > Note, they have old pythons there... that is cpython 2.5.2 and pypy 1.1 in > their comparisons. > I have also no clue how they did those benchmarks. I was trying to repeat them, but was unable to get even close to that numbers. Also, I failed at reporting it to them since you need to create an account there and account creation did not work too well... Cheers, fijal From andrewfr_ice at yahoo.com Tue Sep 29 18:34:19 2009 From: andrewfr_ice at yahoo.com (Andrew Francis) Date: Tue, 29 Sep 2009 09:34:19 -0700 (PDT) Subject: [pypy-dev] Question on Hard Switching and Soft Switching Message-ID: <854337.90183.qm@web112418.mail.gq1.yahoo.com> Hi Folks: As a part of an up-coming talk, I have been reading more about that happens under the hood. For instance "The Essentials of Stackless Python." The terms 'hard switching' and 'soft switching' constantly appear. 'Soft' switching is defined as cooperating switching. I will assume this is associated with a 'stackless.schedule()' or a blocked on channel. Hard switching is defined as brute-force. However what is brute-force? Is this when Stackless is in pre-emptive mode and one is relying on the ticks? Cheers, Andrew From glavoie at gmail.com Wed Sep 30 03:20:47 2009 From: glavoie at gmail.com (Gabriel Lavoie) Date: Tue, 29 Sep 2009 21:20:47 -0400 Subject: [pypy-dev] Stackless Python and PyPy Stackless.py In-Reply-To: References: <622253.76367.qm@web112410.mail.gq1.yahoo.com> <3D291582-7965-4F9E-933E-FA520540514D@gmail.com> Message-ID: Hello everyone, here is a first look at my work. Comments are welcome! :) http://www.mutehq.net/~wildchild/dstackless.html See ya, Gabriel 2009/9/28 Gabriel Lavoie : > 2009/9/28 Leonardo Santagada : >> I am very interested, I thought to do something like it. Where could I see >> your code? >> >> On Sep 28, 2009, at 10:04 AM, Gabriel Lavoie wrote: >> >>> Hello Andrew, >>> ? ?I'm currently experimenting with PyPy's implementation of >>> Stackless to add new features for a university master degree project. >>> I chose PyPy's implementation because it's easier to play with Python >>> code than with C code. Also, since PyPy is "still experimental", it >>> was the best implementation to choose to hack with and I don't regret >>> my choice. What I'm trying to achieve is to add distributed features >>> to Stackless: >>> >>> - Local and networked channels with automatic switch between both >>> - Easy tasklet migration to a remote host, keeping the channel >>> connections between tasklets. >>> - Transparent/automatic dependencies migration when a tasklet is sent >>> to a remote host. >>> >>> Most of the features are done and I'm currently working on the >>> dependencies migration. The only bad part is that I'm doing this >>> project part time since I have a full time job but I have to complete >>> the programming part in the next two months (I've been working for too >>> long on this). >>> >>> If you're interested to see what I've done, just ask! :) >>> >>> See ya, >>> >>> Gabriel >>> >>> 2009/9/25 Andrew Francis : >>>> >>>> Hi Folks: >>>> >>>> Again as a part of my Stackless Python talk, I wanted to include a >>>> section on the "Future." I assume a part of Stackless Python's future is >>>> PyPy? Or am I being presumptuous? >>>> >>>> Regardless I would like to end the talk with a brief section on PyPy. I >>>> noticed the Stackless.py module in lib that contains the Stackless >>>> implementation in Python. >>>> >>>> What I plan to do in my talk is show how a rough approximation of Limbo's >>>> alt (selecting the first ready channel from a list) could be implemented. >>>> >>>> I am a newbie in regards to PyPy. However I have been reading the >>>> Stackless documentation. I thought it would be neat if I ended the talk with >>>> redoing this, but in PyPy as a part of how one could quickly prototype new >>>> Stackless Python features. Any thoughts? Is there anything gotchas? >>>> >>>> Cheers, >>>> Andrew >>>> >>>> >>>> >>>> _______________________________________________ >>>> pypy-dev at codespeak.net >>>> http://codespeak.net/mailman/listinfo/pypy-dev >>>> >>> >>> >>> >>> -- >>> Gabriel Lavoie >>> glavoie at gmail.com >>> _______________________________________________ >>> pypy-dev at codespeak.net >>> http://codespeak.net/mailman/listinfo/pypy-dev >> >> -- >> Leonardo Santagada >> santagada at gmail.com >> >> >> >> > > I still haven't shown publicly my work. I'll try to quickly prepare > something this week with my current test code samples and a quick > description of the API. I think the code quality is pretty bad as this > is my first real Python project. My priority is to have something that > works before doing a big cleanup. > > Gabriel > > -- > Gabriel Lavoie > glavoie at gmail.com > -- Gabriel Lavoie glavoie at gmail.com From holger at merlinux.eu Wed Sep 30 09:59:18 2009 From: holger at merlinux.eu (holger krekel) Date: Wed, 30 Sep 2009 09:59:18 +0200 Subject: [pypy-dev] Stackless Python and PyPy Stackless.py In-Reply-To: References: <622253.76367.qm@web112410.mail.gq1.yahoo.com> <3D291582-7965-4F9E-933E-FA520540514D@gmail.com> Message-ID: <20090930075918.GB15455@trillke.net> Hi Gabriel, very cool. Do you happen to have some conceptual paper/post describing your programming model in more detail? I am asking because i am working on related ideas, i.e. "networked interpreters", see my latest blog post http://tinyurl.com/yco9aua cheers, holger On Tue, Sep 29, 2009 at 21:20 -0400, Gabriel Lavoie wrote: > Hello everyone, > here is a first look at my work. Comments are welcome! :) > > http://www.mutehq.net/~wildchild/dstackless.html > > See ya, > > Gabriel > > 2009/9/28 Gabriel Lavoie : > > 2009/9/28 Leonardo Santagada : > >> I am very interested, I thought to do something like it. Where could I see > >> your code? > >> > >> On Sep 28, 2009, at 10:04 AM, Gabriel Lavoie wrote: > >> > >>> Hello Andrew, > >>> ? ?I'm currently experimenting with PyPy's implementation of > >>> Stackless to add new features for a university master degree project. > >>> I chose PyPy's implementation because it's easier to play with Python > >>> code than with C code. Also, since PyPy is "still experimental", it > >>> was the best implementation to choose to hack with and I don't regret > >>> my choice. What I'm trying to achieve is to add distributed features > >>> to Stackless: > >>> > >>> - Local and networked channels with automatic switch between both > >>> - Easy tasklet migration to a remote host, keeping the channel > >>> connections between tasklets. > >>> - Transparent/automatic dependencies migration when a tasklet is sent > >>> to a remote host. > >>> > >>> Most of the features are done and I'm currently working on the > >>> dependencies migration. The only bad part is that I'm doing this > >>> project part time since I have a full time job but I have to complete > >>> the programming part in the next two months (I've been working for too > >>> long on this). > >>> > >>> If you're interested to see what I've done, just ask! :) > >>> > >>> See ya, > >>> > >>> Gabriel > >>> > >>> 2009/9/25 Andrew Francis : > >>>> > >>>> Hi Folks: > >>>> > >>>> Again as a part of my Stackless Python talk, I wanted to include a > >>>> section on the "Future." I assume a part of Stackless Python's future is > >>>> PyPy? Or am I being presumptuous? > >>>> > >>>> Regardless I would like to end the talk with a brief section on PyPy. I > >>>> noticed the Stackless.py module in lib that contains the Stackless > >>>> implementation in Python. > >>>> > >>>> What I plan to do in my talk is show how a rough approximation of Limbo's > >>>> alt (selecting the first ready channel from a list) could be implemented. > >>>> > >>>> I am a newbie in regards to PyPy. However I have been reading the > >>>> Stackless documentation. I thought it would be neat if I ended the talk with > >>>> redoing this, but in PyPy as a part of how one could quickly prototype new > >>>> Stackless Python features. Any thoughts? Is there anything gotchas? > >>>> > >>>> Cheers, > >>>> Andrew > >>>> > >>>> > >>>> > >>>> _______________________________________________ > >>>> pypy-dev at codespeak.net > >>>> http://codespeak.net/mailman/listinfo/pypy-dev > >>>> > >>> > >>> > >>> > >>> -- > >>> Gabriel Lavoie > >>> glavoie at gmail.com > >>> _______________________________________________ > >>> pypy-dev at codespeak.net > >>> http://codespeak.net/mailman/listinfo/pypy-dev > >> > >> -- > >> Leonardo Santagada > >> santagada at gmail.com > >> > >> > >> > >> > > > > I still haven't shown publicly my work. I'll try to quickly prepare > > something this week with my current test code samples and a quick > > description of the API. I think the code quality is pretty bad as this > > is my first real Python project. My priority is to have something that > > works before doing a big cleanup. > > > > Gabriel > > > > -- > > Gabriel Lavoie > > glavoie at gmail.com > > > > > > -- > Gabriel Lavoie > glavoie at gmail.com > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev -- Metaprogramming, Python, Testing: http://tetamap.wordpress.com Python, PyPy, pytest contracting: http://merlinux.eu From arigo at tunes.org Wed Sep 30 10:31:26 2009 From: arigo at tunes.org (Armin Rigo) Date: Wed, 30 Sep 2009 10:31:26 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <20090928143843.GM15455@trillke.net> References: <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> <200909251523.n8PFNP1j002726@fido.openend.se> <20090928143009.GA19399@code0.codespeak.net> <20090928143843.GM15455@trillke.net> Message-ID: <20090930083126.GA17579@code0.codespeak.net> Hi Holger, On Mon, Sep 28, 2009 at 04:38:43PM +0200, holger krekel wrote: > why is a DB table format better for refactoring than a text file? I suppose it isn't intrinsically. The point is that as far as I know all our previous pypy benchmarks did not even produce a text file containing the results only, in a way that can be nicely re-read by program and refactored. They just produce directly text or html files with the results embedded in whatever presentation was deemed best at the time. More than that I have no preference for a DB format versus an easily-reparsable text file format. A bientot, Armin. From fijall at gmail.com Wed Sep 30 10:46:06 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Wed, 30 Sep 2009 02:46:06 -0600 Subject: [pypy-dev] benchmarking input In-Reply-To: <20090930083126.GA17579@code0.codespeak.net> References: <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> <200909251523.n8PFNP1j002726@fido.openend.se> <20090928143009.GA19399@code0.codespeak.net> <20090928143843.GM15455@trillke.net> <20090930083126.GA17579@code0.codespeak.net> Message-ID: <693bc9ab0909300146j5e4b123ifa1be2799e0b1731@mail.gmail.com> Personally I think the best reason behind using DB is that there is a ton of software that will help you read it in nice objective way. With text files you rather need to write parser/dumper, hence adding more work. Cheers, fijal On Wed, Sep 30, 2009 at 2:31 AM, Armin Rigo wrote: > Hi Holger, > > On Mon, Sep 28, 2009 at 04:38:43PM +0200, holger krekel wrote: >> why is a DB table format better for refactoring than a text file? > > I suppose it isn't intrinsically. ?The point is that as far as I know > all our previous pypy benchmarks did not even produce a text file > containing the results only, in a way that can be nicely re-read by > program and refactored. ?They just produce directly text or html files > with the results embedded in whatever presentation was deemed best at > the time. > > More than that I have no preference for a DB format versus an > easily-reparsable text file format. > > > A bientot, > > Armin. > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > From arigo at tunes.org Wed Sep 30 10:46:47 2009 From: arigo at tunes.org (Armin Rigo) Date: Wed, 30 Sep 2009 10:46:47 +0200 Subject: [pypy-dev] Advice for a talk... In-Reply-To: <693bc9ab0909270019l78af72d7od2df0f0e2c395f02@mail.gmail.com> References: <693bc9ab0909270019l78af72d7od2df0f0e2c395f02@mail.gmail.com> Message-ID: <20090930084647.GB17579@code0.codespeak.net> Hi Michael, I don't know if it's a good idea to continue saying that our JIT is "Psyco-style". Mostly it's not :-) It's a tracing JIT now. "The prototype JIT compiler ran programs X times faster than CPython"... Assuming you are now talking about the new JIT compiler, I suppose that such impressive numbers could still be true, but more importantly it now starts to actually run larger benchmarks with "a good speed-up". I would say that it's around the speed of Psyco, even a bit better in general, except that producing the assembler from the PyPy JIT still takes far too much time for now. Other random notes: * I would kill "logic programming". * As Maciej pointed out, having "high performance GC" was moved to the background for now as our existing GCs, although admittedly simple, perform well enough. A bientot, Armin. From arigo at tunes.org Wed Sep 30 10:51:04 2009 From: arigo at tunes.org (Armin Rigo) Date: Wed, 30 Sep 2009 10:51:04 +0200 Subject: [pypy-dev] Question on Hard Switching and Soft Switching In-Reply-To: <854337.90183.qm@web112418.mail.gq1.yahoo.com> References: <854337.90183.qm@web112418.mail.gq1.yahoo.com> Message-ID: <20090930085104.GC17579@code0.codespeak.net> Hi Andrew, On Tue, Sep 29, 2009 at 09:34:19AM -0700, Andrew Francis wrote: > The terms 'hard switching' and 'soft switching' constantly appear. > 'Soft' switching is defined as cooperating switching. I will assume > this is associated with a 'stackless.schedule()' or a blocked on > channel. > > Hard switching is defined as brute-force. However what is brute-force? > Is this when Stackless is in pre-emptive mode and one is relying on > the ticks? This is a question for Stackless Python only, not PyPy, so you should post in that mailing list, not here :-) To answer it, it's not visible to the user: it's just two different implementations of switching. "Hard switching" is by actually moving part of the C stack away; "soft switching" is using a purely standard C approach. PyPy only has soft switching (but with the same Stackless interface). A bientot, Armin. From arigo at tunes.org Wed Sep 30 10:52:46 2009 From: arigo at tunes.org (Armin Rigo) Date: Wed, 30 Sep 2009 10:52:46 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <693bc9ab0909300146j5e4b123ifa1be2799e0b1731@mail.gmail.com> References: <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> <200909251523.n8PFNP1j002726@fido.openend.se> <20090928143009.GA19399@code0.codespeak.net> <20090928143843.GM15455@trillke.net> <20090930083126.GA17579@code0.codespeak.net> <693bc9ab0909300146j5e4b123ifa1be2799e0b1731@mail.gmail.com> Message-ID: <20090930085246.GD17579@code0.codespeak.net> Hi Maciej, On Wed, Sep 30, 2009 at 02:46:06AM -0600, Maciej Fijalkowski wrote: > Personally I think the best reason behind using DB is that there > is a ton of software that will help you read it in nice objective way. > With text files you rather need to write parser/dumper, hence > adding more work. Yes, and there are also arguments in favor of text files, making the end choice mostly a matter of taste and habit, I suppose. Armin. From holger at merlinux.eu Wed Sep 30 10:52:45 2009 From: holger at merlinux.eu (holger krekel) Date: Wed, 30 Sep 2009 10:52:45 +0200 Subject: [pypy-dev] benchmarking input In-Reply-To: <693bc9ab0909300146j5e4b123ifa1be2799e0b1731@mail.gmail.com> References: <693bc9ab0909240738g41408978t6c9d69fb8e733983@mail.gmail.com> <200909251523.n8PFNP1j002726@fido.openend.se> <20090928143009.GA19399@code0.codespeak.net> <20090928143843.GM15455@trillke.net> <20090930083126.GA17579@code0.codespeak.net> <693bc9ab0909300146j5e4b123ifa1be2799e0b1731@mail.gmail.com> Message-ID: <20090930085245.GD15455@trillke.net> Hi Maciej, Armin, all, On Wed, Sep 30, 2009 at 02:46 -0600, Maciej Fijalkowski wrote: > Personally I think the best reason behind using DB is that there > is a ton of software that will help you read it in nice objective way. > With text files you rather need to write parser/dumper, hence > adding more work. all fine. As commonly discussed with Miquel i think it's best to offer scripts for injecting/retrieving data remotely and then i don't care if it is stored in a DB, Keyvalue-store or text files in a filesystem. best, holger > Cheers, > fijal > > On Wed, Sep 30, 2009 at 2:31 AM, Armin Rigo wrote: > > Hi Holger, > > > > On Mon, Sep 28, 2009 at 04:38:43PM +0200, holger krekel wrote: > >> why is a DB table format better for refactoring than a text file? > > > > I suppose it isn't intrinsically. ?The point is that as far as I know > > all our previous pypy benchmarks did not even produce a text file > > containing the results only, in a way that can be nicely re-read by > > program and refactored. ?They just produce directly text or html files > > with the results embedded in whatever presentation was deemed best at > > the time. > > > > More than that I have no preference for a DB format versus an > > easily-reparsable text file format. > > > > > > A bientot, > > > > Armin. > > _______________________________________________ > > pypy-dev at codespeak.net > > http://codespeak.net/mailman/listinfo/pypy-dev > > > -- Metaprogramming, Python, Testing: http://tetamap.wordpress.com Python, PyPy, pytest contracting: http://merlinux.eu From jonah at eecs.berkeley.edu Wed Sep 30 18:27:55 2009 From: jonah at eecs.berkeley.edu (Jeff Anderson-Lee) Date: Wed, 30 Sep 2009 09:27:55 -0700 Subject: [pypy-dev] support for 64-bit processors and eliminating global state Message-ID: <4AC3870B.40907@eecs.berkeley.edu> I'm new to pypy but would encourage the development folks to apply some focus towards two things: support for both 32 and 64-bit processors and eliminating global state including the GIL. The near future of mainstream processors is multi-core x86_64. For the short-term both 32-bit and 64-bit platforms will be around. Code that makes "naked" assumptions about word size will break and needs to be re-factored to hide the word-size dependencies. Similarly code that assumes a single thread of execution or uses a GIL to protect global state will make efficient use of modern processors. Any language or system that cannot make the transition to 64-bit multi-core will start to loose ground to those that do. At the Parallel Computing Laboratory (UC Berkeley) one of the projects we are working on is called SEJITS which stands for Selective Embedded Just in Time Specialization. The idea is that one can extend a self-introspecting modern scripting language for calling native coded modules (e.g. C) at selected points for handling specialized operations (e.g. vector/matrix operations using tuned SIMD or CUDA code). You can see the abstract of a recent SEJITS paper at http://pmea.ac.upc.edu/program.html (session 1a) but unfortunately the paper is not online yet. Both Python and Ruby are being looked at as potential target languages for SEJITS work. Both have sufficient introspection facilities to support selective JIT operations. Python has an advantage in having been used by the scientific community for longer than Ruby with more established users. I'd love to see this work integrate with pypy. At the moment the folks involved are targeting CPython. In any case, I think the transition to multi-core/multi-threaded 64-bit machines is a potential watershed of major importance which it would behoove pypy-dev folks to keep in mind. Respectfully. Jeff Anderson-Lee From fijall at gmail.com Wed Sep 30 19:13:07 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Wed, 30 Sep 2009 11:13:07 -0600 Subject: [pypy-dev] support for 64-bit processors and eliminating global state In-Reply-To: <4AC3870B.40907@eecs.berkeley.edu> References: <4AC3870B.40907@eecs.berkeley.edu> Message-ID: <693bc9ab0909301013g37990d8djca947faf7ef4f70f@mail.gmail.com> Hi. In general, we try hard not to make hard assumptions, so the transition to 64 bit should be generally smooth. However, we have limited resources and we're generally volunteer run. For example, my laptop does not support 64bit, which makes it significantly harder for me to work on (that being one example). So, 64bit support is put on the second plan, but not because we plan to hardcode 32bit everywhere, but simply because our resources are limited. If you want to push for it by donating time/money you're welcome to do so. It's nice to hear what you're doing, however full paper would be much better than the abstract. Is there a way to obtain it somehow else? PS. I also did some stuff, like this, by lazily constructing numpy expressions and compiling them to assembler, so I'm personally interested in hearing more. Cheers, fijal On Wed, Sep 30, 2009 at 10:27 AM, Jeff Anderson-Lee wrote: > I'm new to pypy but would encourage the development folks to apply some > focus towards two things: support for both 32 and 64-bit processors and > eliminating global state including the GIL. > > The near future of mainstream processors is multi-core x86_64. ?For the > short-term both 32-bit and 64-bit platforms will be around. ?Code that > makes "naked" assumptions about word size will break and needs to be > re-factored to hide the word-size dependencies. ? Similarly code that > assumes a single thread of execution or uses a GIL to protect global > state will make efficient use of modern processors. ?Any language or > system that cannot make the transition to 64-bit multi-core will start > to loose ground to those that do. > > At the Parallel Computing Laboratory (UC Berkeley) one of the projects > we are working on is called SEJITS which stands for Selective Embedded > Just in Time Specialization. ?The idea is that one can extend a > self-introspecting modern scripting language for calling native coded > modules (e.g. C) at selected points for handling specialized operations > (e.g. vector/matrix operations using tuned SIMD or CUDA code). ?You can > see the abstract of a recent SEJITS paper at > http://pmea.ac.upc.edu/program.html (session 1a) but unfortunately the > paper is not online yet. > > Both Python and Ruby are being looked at as potential target languages > for SEJITS work. ?Both have sufficient introspection facilities to > support selective JIT operations. ?Python has an advantage in having > been used by the scientific community for longer than Ruby with more > established users. ?I'd love to see this work integrate with pypy. ?At > the moment the folks involved are targeting CPython. > In any case, I think the transition to multi-core/multi-threaded 64-bit > machines is a potential watershed of major importance which it would > behoove pypy-dev folks to keep in mind. > > Respectfully. > > Jeff Anderson-Lee > > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev > From santagada at gmail.com Wed Sep 30 19:06:25 2009 From: santagada at gmail.com (Leonardo Santagada) Date: Wed, 30 Sep 2009 14:06:25 -0300 Subject: [pypy-dev] Mac osx patches Message-ID: <2BB35F3B-39BC-40F4-90ED-1974894B1B8A@gmail.com> To make compilation and asmgcroot happy on osx I made some changes, I would like to know if someone has anything against applying this stuff to trunk? Index: pypy/translator/c/gcc/trackgcroot.py =================================================================== --- pypy/translator/c/gcc/trackgcroot.py (revision 68036) +++ pypy/translator/c/gcc/trackgcroot.py (working copy) @@ -22,7 +22,11 @@ r_sectionstart = re.compile(r"\t\.("+'|'.join(OTHERSECTIONS) +").*$") r_functionstart_darwin = re.compile(r"_(\w+):\s*$") -OFFSET_LABELS = 2**30 +# darwin's ld complain about this hack +if sys.platform == 'darwin': + OFFSET_LABELS = 0 +else: + OFFSET_LABELS = 2**30 # inside functions LABEL = r'([.]?[\w$@]+)' Index: pypy/translator/platform/test/test_darwin.py =================================================================== --- pypy/translator/platform/test/test_darwin.py (revision 68036) +++ pypy/translator/platform/test/test_darwin.py (working copy) @@ -2,8 +2,8 @@ """ File containing darwin platform tests """ -import py, os -if os.name != 'darwin': +import py, sys +if sys.platform != 'darwin': py.test.skip("Darwin only") from pypy.tool.udir import udir Index: pypy/translator/platform/darwin.py =================================================================== --- pypy/translator/platform/darwin.py (revision 68036) +++ pypy/translator/platform/darwin.py (working copy) @@ -18,7 +18,7 @@ self.cc = cc def _args_for_shared(self, args): - return (self.shared_only + ['-bundle', '-undefined', 'dynamic_lookup'] + return (self.shared_only + ['-dynamiclib', '-undefined', 'dynamic_lookup'] + args) def include_dirs_for_libffi(self): -- Leonardo Santagada santagada at gmail.com From santagada at gmail.com Wed Sep 30 19:16:49 2009 From: santagada at gmail.com (Leonardo Santagada) Date: Wed, 30 Sep 2009 14:16:49 -0300 Subject: [pypy-dev] support for 64-bit processors and eliminating global state In-Reply-To: <4AC3870B.40907@eecs.berkeley.edu> References: <4AC3870B.40907@eecs.berkeley.edu> Message-ID: On Sep 30, 2009, at 1:27 PM, Jeff Anderson-Lee wrote: > I'm new to pypy but would encourage the development folks to apply > some > focus towards two things: support for both 32 and 64-bit processors > and > eliminating global state including the GIL. > > The near future of mainstream processors is multi-core x86_64. For > the > short-term both 32-bit and 64-bit platforms will be around. Code that > makes "naked" assumptions about word size will break and needs to be > re-factored to hide the word-size dependencies. Similarly code that > assumes a single thread of execution or uses a GIL to protect global > state will make efficient use of modern processors. Any language or > system that cannot make the transition to 64-bit multi-core will start > to loose ground to those that do. PyPy does support 32 and 64 bit processors, the jit for x86_64 is not ready though but this is just a problem of time, when the 32 bit jit is ready doing a 64bit one is simple (but many manhours of work). The GIL in pypy is only there because no one proposed anything to change that, pypy already does not depend on reference counting but can use a garbage collector so it is probably way easier to change than CPython. > At the Parallel Computing Laboratory (UC Berkeley) one of the projects > we are working on is called SEJITS which stands for Selective Embedded > Just in Time Specialization. The idea is that one can extend a > self-introspecting modern scripting language for calling native coded > modules (e.g. C) at selected points for handling specialized > operations > (e.g. vector/matrix operations using tuned SIMD or CUDA code). You > can > see the abstract of a recent SEJITS paper at > http://pmea.ac.upc.edu/program.html (session 1a) but unfortunately the > paper is not online yet. > > Both Python and Ruby are being looked at as potential target languages > for SEJITS work. Both have sufficient introspection facilities to > support selective JIT operations. Python has an advantage in having > been used by the scientific community for longer than Ruby with more > established users. I'd love to see this work integrate with pypy. At > the moment the folks involved are targeting CPython. > In any case, I think the transition to multi-core/multi-threaded 64- > bit > machines is a potential watershed of major importance which it would > behoove pypy-dev folks to keep in mind. I haven't read the paper but pypy does already have a JIT, maybe if you are interested in it you can read more on the pypy blog http://morepypy.blogspot.com/ . Probably someone with more experience with both pypy and the JIT is going to answer this email so I will not try to explain it in here. -- Leonardo Santagada santagada at gmail.com From fijall at gmail.com Wed Sep 30 19:22:24 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Wed, 30 Sep 2009 11:22:24 -0600 Subject: [pypy-dev] support for 64-bit processors and eliminating global state In-Reply-To: References: <4AC3870B.40907@eecs.berkeley.edu> Message-ID: <693bc9ab0909301022n3c0b7b1bq8e0d9aacde54e3f8@mail.gmail.com> Hi Leonardo. I think you're not reading this mail in details, let me explain. On Wed, Sep 30, 2009 at 11:16 AM, Leonardo Santagada wrote: > > On Sep 30, 2009, at 1:27 PM, Jeff Anderson-Lee wrote: > >> I'm new to pypy but would encourage the development folks to apply >> some >> focus towards two things: support for both 32 and 64-bit processors >> and >> eliminating global state including the GIL. >> >> The near future of mainstream processors is multi-core x86_64. ?For >> the >> short-term both 32-bit and 64-bit platforms will be around. ?Code that >> makes "naked" assumptions about word size will break and needs to be >> re-factored to hide the word-size dependencies. ? Similarly code that >> assumes a single thread of execution or uses a GIL to protect global >> state will make efficient use of modern processors. ?Any language or >> system that cannot make the transition to 64-bit multi-core will start >> to loose ground to those that do. > > PyPy does support 32 and 64 bit processors, the jit for x86_64 is not > ready though but this is just a problem of time, when the 32 bit jit > is ready doing a 64bit one is simple (but many manhours of work). It's not that many hours needed to have 64bit JIT as far as I know. I did a lot of refactoring recently so it should be much easier. Also we don't have a 64bit buildbot, which means 64bit support might rot over time, we don't know and it's not officially supported. > > The GIL in pypy is only there because no one proposed anything to > change that, pypy already does not depend on reference counting but > can use a garbage collector so it is probably way easier to change > than CPython. It's true that we don't have a good story here and we need one. Something a'la Jython would work (unlike in CPython), but it's work. > > >> At the Parallel Computing Laboratory (UC Berkeley) one of the projects >> we are working on is called SEJITS which stands for Selective Embedded >> Just in Time Specialization. ?The idea is that one can extend a >> self-introspecting modern scripting language for calling native coded >> modules (e.g. C) at selected points for handling specialized >> operations >> (e.g. vector/matrix operations using tuned SIMD or CUDA code). ?You >> can >> see the abstract of a recent SEJITS paper at >> http://pmea.ac.upc.edu/program.html (session 1a) but unfortunately the >> paper is not online yet. >> >> Both Python and Ruby are being looked at as potential target languages >> for SEJITS work. ?Both have sufficient introspection facilities to >> support selective JIT operations. ?Python has an advantage in having >> been used by the scientific community for longer than Ruby with more >> established users. ?I'd love to see this work integrate with pypy. ?At >> the moment the folks involved are targeting CPython. >> In any case, I think the transition to multi-core/multi-threaded 64- >> bit >> machines is a potential watershed of major importance which it would >> behoove pypy-dev folks to keep in mind. > > > I haven't read the paper but pypy does already have a JIT, maybe if > you are interested in it you can read more on the pypy blog http://morepypy.blogspot.com/ > . Probably someone with more experience with both pypy and the JIT is > going to answer this email so I will not try to explain it in here. > Note that's not precisely what Jeff wants. General purpose JIT is nice, but it's rather hard to imagine how it'll generate efficient CUDA code automatically, without hints from the user. Since PyPy actually has a jit-generator, it should be far easier to implement this in PyPy than somewhere else (you can write code that is "interpreter" and JIT will be automatically created for it), however it's still work to get nice paralellizable (or parallelizing?) framework. Cheers, fijal From jonah at eecs.berkeley.edu Wed Sep 30 20:19:06 2009 From: jonah at eecs.berkeley.edu (Jeff Anderson-Lee) Date: Wed, 30 Sep 2009 11:19:06 -0700 Subject: [pypy-dev] support for 64-bit processors and eliminating global state In-Reply-To: <693bc9ab0909301022n3c0b7b1bq8e0d9aacde54e3f8@mail.gmail.com> References: <4AC3870B.40907@eecs.berkeley.edu> <693bc9ab0909301022n3c0b7b1bq8e0d9aacde54e3f8@mail.gmail.com> Message-ID: <4AC3A11A.6050506@eecs.berkeley.edu> Maciej Fijalkowski wrote: > Hi Leonardo. I think you're not reading this mail in details, let me explain. > > On Wed, Sep 30, 2009 at 11:16 AM, Leonardo Santagada > wrote: > It's not that many hours needed to have 64bit JIT as far as I know. > I did a lot of refactoring recently so it should be much easier. > Also we don't have a 64bit buildbot, which means 64bit support might > rot over time, > we don't know and it's not officially supported. > It's great to hear that you are already working in the 64-bit direction. Most modern laptops have 64-bit compatible chips. My two-year old centrino duo does. A dual boot solution (e.g. with Ubunto for x86_64) can do the trick for an inexpensive development environment. That's not the same as a buildbot though. Some of the original 64-bit processors are nearing retirement age though, so perhaps some kind soul may see this note and volunteer an old system that has been replaced to support pypy-dev. Just sayin' it's important. Glad you seem to think so too. >> The GIL in pypy is only there because no one proposed anything to >> change that, pypy already does not depend on reference counting but >> can use a garbage collector so it is probably way easier to change >> than CPython. >> > > It's true that we don't have a good story here and we need one. Something > a'la Jython would work (unlike in CPython), but it's work. > The last time I looked, Hoard didn't support x86_64 although it did seem to work for threaded environments fairly efficiently if I recall. Having a separate arena for each thread (or each virtual processor) helps to avoid a lot of locking for frequent/small allocations in a VM. That may mean factoring out the allocation so that it calls something like myalloc(pool,size) rather than just malloc(size). I read that pypy was trying to factor out the GC code to support multiple back-ends. Having an API that supports multiple concurrent allocator pools can be useful in that regard. Similarly, a JIT can be modularized so as not to depend on globals, but have a JitContext structure: jit_xxx(struct JitContext *jc, ...) That allows jitting to be going on in multiple threads at once. I looked at libjit and it didn't have that structure, meaning that jit processing of functions was a potential bottleneck. I haven't got deep enough into pypy yet to know whether or not that is the case for you folks. In fact, I'd like to encourage the use of a global-less coding style for the sake of improved parallelization. Every global is another reason for a GIL. >> I haven't read the paper but pypy does already have a JIT, maybe if >> you are interested in it you can read more on the pypy blog http://morepypy.blogspot.com/ >> . Probably someone with more experience with both pypy and the JIT is >> going to answer this email so I will not try to explain it in here. >> >> I'm trying to get the authors to post the paper since it has already been presented. When they do I'll forward a link. > > Note that's not precisely what Jeff wants. General purpose JIT is nice, but > it's rather hard to imagine how it'll generate efficient CUDA code > automatically, > without hints from the user. Since PyPy actually has a jit-generator, it should > be far easier to implement this in PyPy than somewhere else (you can write > code that is "interpreter" and JIT will be automatically created for > it), however > it's still work to get nice paralellizable (or parallelizing?) framework. > Yes. The SEJITS approach can be used even with a Python that doesn't have a JIT as long as it has a suitable foreign function interface. The trick is to interpose in the AST processing to recognize and handle "selective" patterns in the tree. The current system actually generates C-code on-the fly then compiles and links it in with FFI hooks so that subsequent calls can access it more directly. This is obviously only worth doing for code for which the native code is substantially faster and/or will be called sufficiently often. If I had the cash on hand I would gladly support your work with a donation. Unfortunately I don't have sufficient personal resources nor access to corporate funds. (As a research group, we get our funds from outside donations, not dole it out!) I think it's a great project though, and if cheer leading counts, you definitely have my support in that regard. From glavoie at gmail.com Wed Sep 30 21:28:09 2009 From: glavoie at gmail.com (Gabriel Lavoie) Date: Wed, 30 Sep 2009 15:28:09 -0400 Subject: [pypy-dev] support for 64-bit processors and eliminating global state In-Reply-To: <693bc9ab0909301022n3c0b7b1bq8e0d9aacde54e3f8@mail.gmail.com> References: <4AC3870B.40907@eecs.berkeley.edu> <693bc9ab0909301022n3c0b7b1bq8e0d9aacde54e3f8@mail.gmail.com> Message-ID: Hello Maciej, actually you have a 64 bits buildbot, mine. It's under FreeBSD but it's still 64 bits. Cheers, Gabriel 2009/9/30 Maciej Fijalkowski : > Hi Leonardo. I think you're not reading this mail in details, let me explain. > > On Wed, Sep 30, 2009 at 11:16 AM, Leonardo Santagada > wrote: >> >> On Sep 30, 2009, at 1:27 PM, Jeff Anderson-Lee wrote: >> >>> I'm new to pypy but would encourage the development folks to apply >>> some >>> focus towards two things: support for both 32 and 64-bit processors >>> and >>> eliminating global state including the GIL. >>> >>> The near future of mainstream processors is multi-core x86_64. ?For >>> the >>> short-term both 32-bit and 64-bit platforms will be around. ?Code that >>> makes "naked" assumptions about word size will break and needs to be >>> re-factored to hide the word-size dependencies. ? Similarly code that >>> assumes a single thread of execution or uses a GIL to protect global >>> state will make efficient use of modern processors. ?Any language or >>> system that cannot make the transition to 64-bit multi-core will start >>> to loose ground to those that do. >> >> PyPy does support 32 and 64 bit processors, the jit for x86_64 is not >> ready though but this is just a problem of time, when the 32 bit jit >> is ready doing a 64bit one is simple (but many manhours of work). > > It's not that many hours needed to have 64bit JIT as far as I know. > I did a lot of refactoring recently so it should be much easier. > Also we don't have a 64bit buildbot, which means 64bit support might > rot over time, > we don't know and it's not officially supported. > >> >> The GIL in pypy is only there because no one proposed anything to >> change that, pypy already does not depend on reference counting but >> can use a garbage collector so it is probably way easier to change >> than CPython. > > It's true that we don't have a good story here and we need one. Something > a'la Jython would work (unlike in CPython), but it's work. > >> >> >>> At the Parallel Computing Laboratory (UC Berkeley) one of the projects >>> we are working on is called SEJITS which stands for Selective Embedded >>> Just in Time Specialization. ?The idea is that one can extend a >>> self-introspecting modern scripting language for calling native coded >>> modules (e.g. C) at selected points for handling specialized >>> operations >>> (e.g. vector/matrix operations using tuned SIMD or CUDA code). ?You >>> can >>> see the abstract of a recent SEJITS paper at >>> http://pmea.ac.upc.edu/program.html (session 1a) but unfortunately the >>> paper is not online yet. >>> >>> Both Python and Ruby are being looked at as potential target languages >>> for SEJITS work. ?Both have sufficient introspection facilities to >>> support selective JIT operations. ?Python has an advantage in having >>> been used by the scientific community for longer than Ruby with more >>> established users. ?I'd love to see this work integrate with pypy. ?At >>> the moment the folks involved are targeting CPython. >>> In any case, I think the transition to multi-core/multi-threaded 64- >>> bit >>> machines is a potential watershed of major importance which it would >>> behoove pypy-dev folks to keep in mind. >> >> >> I haven't read the paper but pypy does already have a JIT, maybe if >> you are interested in it you can read more on the pypy blog http://morepypy.blogspot.com/ >> . Probably someone with more experience with both pypy and the JIT is >> going to answer this email so I will not try to explain it in here. >> > > Note that's not precisely what Jeff wants. General purpose JIT is nice, but > it's rather hard to imagine how it'll generate efficient CUDA code > automatically, > without hints from the user. Since PyPy actually has a jit-generator, it should > be far easier to implement this in PyPy than somewhere else (you can write > code that is "interpreter" and JIT will be automatically created for > it), however > it's still work to get nice paralellizable (or parallelizing?) framework. > > Cheers, > fijal > _______________________________________________ > pypy-dev at codespeak.net > http://codespeak.net/mailman/listinfo/pypy-dev -- Gabriel Lavoie glavoie at gmail.com From fijall at gmail.com Wed Sep 30 21:32:16 2009 From: fijall at gmail.com (Maciej Fijalkowski) Date: Wed, 30 Sep 2009 13:32:16 -0600 Subject: [pypy-dev] support for 64-bit processors and eliminating global state In-Reply-To: References: <4AC3870B.40907@eecs.berkeley.edu> <693bc9ab0909301022n3c0b7b1bq8e0d9aacde54e3f8@mail.gmail.com> Message-ID: <693bc9ab0909301232p17c0fbd2ge904f0a2f6538ec0@mail.gmail.com> Yeah, but since FreeBSD is not a supported platform we can't really say that :-) Of course we appreciate that, no doubt Cheers, fijal On Wed, Sep 30, 2009 at 1:28 PM, Gabriel Lavoie wrote: > Hello Maciej, > ? ?actually you have a 64 bits buildbot, mine. It's under FreeBSD but > it's still 64 bits. > > Cheers, > > Gabriel > > 2009/9/30 Maciej Fijalkowski : >> Hi Leonardo. I think you're not reading this mail in details, let me explain. >> >> On Wed, Sep 30, 2009 at 11:16 AM, Leonardo Santagada >> wrote: >>> >>> On Sep 30, 2009, at 1:27 PM, Jeff Anderson-Lee wrote: >>> >>>> I'm new to pypy but would encourage the development folks to apply >>>> some >>>> focus towards two things: support for both 32 and 64-bit processors >>>> and >>>> eliminating global state including the GIL. >>>> >>>> The near future of mainstream processors is multi-core x86_64. ?For >>>> the >>>> short-term both 32-bit and 64-bit platforms will be around. ?Code that >>>> makes "naked" assumptions about word size will break and needs to be >>>> re-factored to hide the word-size dependencies. ? Similarly code that >>>> assumes a single thread of execution or uses a GIL to protect global >>>> state will make efficient use of modern processors. ?Any language or >>>> system that cannot make the transition to 64-bit multi-core will start >>>> to loose ground to those that do. >>> >>> PyPy does support 32 and 64 bit processors, the jit for x86_64 is not >>> ready though but this is just a problem of time, when the 32 bit jit >>> is ready doing a 64bit one is simple (but many manhours of work). >> >> It's not that many hours needed to have 64bit JIT as far as I know. >> I did a lot of refactoring recently so it should be much easier. >> Also we don't have a 64bit buildbot, which means 64bit support might >> rot over time, >> we don't know and it's not officially supported. >> >>> >>> The GIL in pypy is only there because no one proposed anything to >>> change that, pypy already does not depend on reference counting but >>> can use a garbage collector so it is probably way easier to change >>> than CPython. >> >> It's true that we don't have a good story here and we need one. Something >> a'la Jython would work (unlike in CPython), but it's work. >> >>> >>> >>>> At the Parallel Computing Laboratory (UC Berkeley) one of the projects >>>> we are working on is called SEJITS which stands for Selective Embedded >>>> Just in Time Specialization. ?The idea is that one can extend a >>>> self-introspecting modern scripting language for calling native coded >>>> modules (e.g. C) at selected points for handling specialized >>>> operations >>>> (e.g. vector/matrix operations using tuned SIMD or CUDA code). ?You >>>> can >>>> see the abstract of a recent SEJITS paper at >>>> http://pmea.ac.upc.edu/program.html (session 1a) but unfortunately the >>>> paper is not online yet. >>>> >>>> Both Python and Ruby are being looked at as potential target languages >>>> for SEJITS work. ?Both have sufficient introspection facilities to >>>> support selective JIT operations. ?Python has an advantage in having >>>> been used by the scientific community for longer than Ruby with more >>>> established users. ?I'd love to see this work integrate with pypy. ?At >>>> the moment the folks involved are targeting CPython. >>>> In any case, I think the transition to multi-core/multi-threaded 64- >>>> bit >>>> machines is a potential watershed of major importance which it would >>>> behoove pypy-dev folks to keep in mind. >>> >>> >>> I haven't read the paper but pypy does already have a JIT, maybe if >>> you are interested in it you can read more on the pypy blog http://morepypy.blogspot.com/ >>> . Probably someone with more experience with both pypy and the JIT is >>> going to answer this email so I will not try to explain it in here. >>> >> >> Note that's not precisely what Jeff wants. General purpose JIT is nice, but >> it's rather hard to imagine how it'll generate efficient CUDA code >> automatically, >> without hints from the user. Since PyPy actually has a jit-generator, it should >> be far easier to implement this in PyPy than somewhere else (you can write >> code that is "interpreter" and JIT will be automatically created for >> it), however >> it's still work to get nice paralellizable (or parallelizing?) framework. >> >> Cheers, >> fijal >> _______________________________________________ >> pypy-dev at codespeak.net >> http://codespeak.net/mailman/listinfo/pypy-dev > > > > -- > Gabriel Lavoie > glavoie at gmail.com > From glavoie at gmail.com Wed Sep 30 22:01:25 2009 From: glavoie at gmail.com (Gabriel Lavoie) Date: Wed, 30 Sep 2009 16:01:25 -0400 Subject: [pypy-dev] support for 64-bit processors and eliminating global state In-Reply-To: <693bc9ab0909301232p17c0fbd2ge904f0a2f6538ec0@mail.gmail.com> References: <4AC3870B.40907@eecs.berkeley.edu> <693bc9ab0909301022n3c0b7b1bq8e0d9aacde54e3f8@mail.gmail.com> <693bc9ab0909301232p17c0fbd2ge904f0a2f6538ec0@mail.gmail.com> Message-ID: Actually, what are the officially supported platforms? 2009/9/30 Maciej Fijalkowski : > Yeah, but since FreeBSD is not a supported platform we can't really > say that :-) Of course we appreciate that, no doubt > > Cheers, > fijal > > On Wed, Sep 30, 2009 at 1:28 PM, Gabriel Lavoie wrote: >> Hello Maciej, >> ? ?actually you have a 64 bits buildbot, mine. It's under FreeBSD but >> it's still 64 bits. >> >> Cheers, >> >> Gabriel >> >> 2009/9/30 Maciej Fijalkowski : >>> Hi Leonardo. I think you're not reading this mail in details, let me explain. >>> >>> On Wed, Sep 30, 2009 at 11:16 AM, Leonardo Santagada >>> wrote: >>>> >>>> On Sep 30, 2009, at 1:27 PM, Jeff Anderson-Lee wrote: >>>> >>>>> I'm new to pypy but would encourage the development folks to apply >>>>> some >>>>> focus towards two things: support for both 32 and 64-bit processors >>>>> and >>>>> eliminating global state including the GIL. >>>>> >>>>> The near future of mainstream processors is multi-core x86_64. ?For >>>>> the >>>>> short-term both 32-bit and 64-bit platforms will be around. ?Code that >>>>> makes "naked" assumptions about word size will break and needs to be >>>>> re-factored to hide the word-size dependencies. ? Similarly code that >>>>> assumes a single thread of execution or uses a GIL to protect global >>>>> state will make efficient use of modern processors. ?Any language or >>>>> system that cannot make the transition to 64-bit multi-core will start >>>>> to loose ground to those that do. >>>> >>>> PyPy does support 32 and 64 bit processors, the jit for x86_64 is not >>>> ready though but this is just a problem of time, when the 32 bit jit >>>> is ready doing a 64bit one is simple (but many manhours of work). >>> >>> It's not that many hours needed to have 64bit JIT as far as I know. >>> I did a lot of refactoring recently so it should be much easier. >>> Also we don't have a 64bit buildbot, which means 64bit support might >>> rot over time, >>> we don't know and it's not officially supported. >>> >>>> >>>> The GIL in pypy is only there because no one proposed anything to >>>> change that, pypy already does not depend on reference counting but >>>> can use a garbage collector so it is probably way easier to change >>>> than CPython. >>> >>> It's true that we don't have a good story here and we need one. Something >>> a'la Jython would work (unlike in CPython), but it's work. >>> >>>> >>>> >>>>> At the Parallel Computing Laboratory (UC Berkeley) one of the projects >>>>> we are working on is called SEJITS which stands for Selective Embedded >>>>> Just in Time Specialization. ?The idea is that one can extend a >>>>> self-introspecting modern scripting language for calling native coded >>>>> modules (e.g. C) at selected points for handling specialized >>>>> operations >>>>> (e.g. vector/matrix operations using tuned SIMD or CUDA code). ?You >>>>> can >>>>> see the abstract of a recent SEJITS paper at >>>>> http://pmea.ac.upc.edu/program.html (session 1a) but unfortunately the >>>>> paper is not online yet. >>>>> >>>>> Both Python and Ruby are being looked at as potential target languages >>>>> for SEJITS work. ?Both have sufficient introspection facilities to >>>>> support selective JIT operations. ?Python has an advantage in having >>>>> been used by the scientific community for longer than Ruby with more >>>>> established users. ?I'd love to see this work integrate with pypy. ?At >>>>> the moment the folks involved are targeting CPython. >>>>> In any case, I think the transition to multi-core/multi-threaded 64- >>>>> bit >>>>> machines is a potential watershed of major importance which it would >>>>> behoove pypy-dev folks to keep in mind. >>>> >>>> >>>> I haven't read the paper but pypy does already have a JIT, maybe if >>>> you are interested in it you can read more on the pypy blog http://morepypy.blogspot.com/ >>>> . Probably someone with more experience with both pypy and the JIT is >>>> going to answer this email so I will not try to explain it in here. >>>> >>> >>> Note that's not precisely what Jeff wants. General purpose JIT is nice, but >>> it's rather hard to imagine how it'll generate efficient CUDA code >>> automatically, >>> without hints from the user. Since PyPy actually has a jit-generator, it should >>> be far easier to implement this in PyPy than somewhere else (you can write >>> code that is "interpreter" and JIT will be automatically created for >>> it), however >>> it's still work to get nice paralellizable (or parallelizing?) framework. >>> >>> Cheers, >>> fijal >>> _______________________________________________ >>> pypy-dev at codespeak.net >>> http://codespeak.net/mailman/listinfo/pypy-dev >> >> >> >> -- >> Gabriel Lavoie >> glavoie at gmail.com >> > -- Gabriel Lavoie glavoie at gmail.com